Actual source code: tssen.c

petsc-master 2019-06-15
Report Typos and Errors
  1:  #include <petsc/private/tsimpl.h>
  2:  #include <petscdraw.h>

  4: PetscLogEvent TS_AdjointStep,TS_ForwardStep,TS_JacobianPEval;

  6: /* ------------------------ Sensitivity Context ---------------------------*/

  8: /*@C
  9:   TSSetRHSJacobianP - Sets the function that computes the Jacobian of G w.r.t. the parameters P where U_t = G(U,P,t), as well as the location to store the matrix.

 11:   Logically Collective on TS

 13:   Input Parameters:
 14: + ts - TS context obtained from TSCreate()
 15: . Amat - JacobianP matrix
 16: . func - function
 17: - ctx - [optional] user-defined function context

 19:   Calling sequence of func:
 20: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
 21: +   t - current timestep
 22: .   U - input vector (current ODE solution)
 23: .   A - output matrix
 24: -   ctx - [optional] user-defined function context

 26:   Level: intermediate

 28:   Notes:
 29:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

 31: .seealso: TSGetRHSJacobianP()
 32: @*/
 33: PetscErrorCode TSSetRHSJacobianP(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Mat,void*),void *ctx)
 34: {


 41:   ts->rhsjacobianp    = func;
 42:   ts->rhsjacobianpctx = ctx;
 43:   if(Amat) {
 44:     PetscObjectReference((PetscObject)Amat);
 45:     MatDestroy(&ts->Jacprhs);
 46:     ts->Jacprhs = Amat;
 47:   }
 48:   return(0);
 49: }

 51: /*@C
 52:   TSGetRHSJacobianP - Gets the function that computes the Jacobian of G w.r.t. the parameters P where U_t = G(U,P,t), as well as the location to store the matrix.

 54:   Logically Collective on TS

 56:   Input Parameters:
 57: . ts - TS context obtained from TSCreate()

 59:   Output Parameters:
 60: + Amat - JacobianP matrix
 61: . func - function
 62: - ctx - [optional] user-defined function context

 64:   Calling sequence of func:
 65: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
 66: +   t - current timestep
 67: .   U - input vector (current ODE solution)
 68: .   A - output matrix
 69: -   ctx - [optional] user-defined function context

 71:   Level: intermediate

 73:   Notes:
 74:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

 76: .seealso: TSSetRHSJacobianP()
 77: @*/
 78: PetscErrorCode TSGetRHSJacobianP(TS ts,Mat *Amat,PetscErrorCode (**func)(TS,PetscReal,Vec,Mat,void*),void **ctx)
 79: {
 81:   if (func) *func = ts->rhsjacobianp;
 82:   if (ctx) *ctx  = ts->rhsjacobianpctx;
 83:   if (Amat) *Amat = ts->Jacprhs;
 84:   return(0);
 85: }

 87: /*@C
 88:   TSComputeRHSJacobianP - Runs the user-defined JacobianP function.

 90:   Collective on TS

 92:   Input Parameters:
 93: . ts   - The TS context obtained from TSCreate()

 95:   Level: developer

 97: .seealso: TSSetRHSJacobianP()
 98: @*/
 99: PetscErrorCode TSComputeRHSJacobianP(TS ts,PetscReal t,Vec U,Mat Amat)
100: {

104:   if (!Amat) return(0);

108:   PetscStackPush("TS user JacobianP function for sensitivity analysis");
109:   (*ts->rhsjacobianp)(ts,t,U,Amat,ts->rhsjacobianpctx);
110:   PetscStackPop;
111:   return(0);
112: }

114: /*@C
115:   TSSetIJacobianP - Sets the function that computes the Jacobian of F w.r.t. the parameters P where F(Udot,U,t) = G(U,P,t), as well as the location to store the matrix.

117:   Logically Collective on TS

119:   Input Parameters:
120: + ts - TS context obtained from TSCreate()
121: . Amat - JacobianP matrix
122: . func - function
123: - ctx - [optional] user-defined function context

125:   Calling sequence of func:
126: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
127: +   t - current timestep
128: .   U - input vector (current ODE solution)
129: .   Udot - time derivative of state vector
130: .   shift - shift to apply, see note below
131: .   A - output matrix
132: -   ctx - [optional] user-defined function context

134:   Level: intermediate

136:   Notes:
137:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

139: .seealso:
140: @*/
141: PetscErrorCode TSSetIJacobianP(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Vec,PetscReal,Mat,void*),void *ctx)
142: {


149:   ts->ijacobianp    = func;
150:   ts->ijacobianpctx = ctx;
151:   if(Amat) {
152:     PetscObjectReference((PetscObject)Amat);
153:     MatDestroy(&ts->Jacp);
154:     ts->Jacp = Amat;
155:   }
156:   return(0);
157: }

159: /*@C
160:   TSComputeIJacobianP - Runs the user-defined IJacobianP function.

162:   Collective on TS

164:   Input Parameters:
165: + ts - the TS context
166: . t - current timestep
167: . U - state vector
168: . Udot - time derivative of state vector
169: . shift - shift to apply, see note below
170: - imex - flag indicates if the method is IMEX so that the RHSJacobian should be kept separate

172:   Output Parameters:
173: . A - Jacobian matrix

175:   Level: developer

177: .seealso: TSSetIJacobianP()
178: @*/
179: PetscErrorCode TSComputeIJacobianP(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal shift,Mat Amat,PetscBool imex)
180: {

184:   if (!Amat) return(0);

189:   PetscLogEventBegin(TS_JacobianPEval,ts,U,Amat,0);
190:   if (ts->ijacobianp) {
191:     PetscStackPush("TS user JacobianP function for sensitivity analysis");
192:     (*ts->ijacobianp)(ts,t,U,Udot,shift,Amat,ts->ijacobianpctx);
193:     PetscStackPop;
194:   }
195:   if (imex) {
196:     if (!ts->ijacobianp) {  /* system was written as Udot = G(t,U) */
197:       PetscBool assembled;
198:       MatZeroEntries(Amat);
199:       MatAssembled(Amat,&assembled);
200:       if (!assembled) {
201:         MatAssemblyBegin(Amat,MAT_FINAL_ASSEMBLY);
202:         MatAssemblyEnd(Amat,MAT_FINAL_ASSEMBLY);
203:       }
204:     }
205:   } else {
206:     if (ts->rhsjacobianp) {
207:       TSComputeRHSJacobianP(ts,t,U,ts->Jacprhs);
208:     }
209:     if (ts->Jacprhs == Amat) { /* No IJacobian, so we only have the RHS matrix */
210:       MatScale(Amat,-1);
211:     } else if (ts->Jacprhs) { /* Both IJacobian and RHSJacobian */
212:       MatStructure axpy = DIFFERENT_NONZERO_PATTERN;
213:       if (!ts->ijacobianp) { /* No IJacobianp provided, but we have a separate RHS matrix */
214:         MatZeroEntries(Amat);
215:       }
216:       MatAXPY(Amat,-1,ts->Jacprhs,axpy);
217:     }
218:   }
219:   PetscLogEventEnd(TS_JacobianPEval,ts,U,Amat,0);
220:   return(0);
221: }

223: /*@C
224:     TSSetCostIntegrand - Sets the routine for evaluating the integral term in one or more cost functions

226:     Logically Collective on TS

228:     Input Parameters:
229: +   ts - the TS context obtained from TSCreate()
230: .   numcost - number of gradients to be computed, this is the number of cost functions
231: .   costintegral - vector that stores the integral values
232: .   rf - routine for evaluating the integrand function
233: .   drduf - function that computes the gradients of the r's with respect to u
234: .   drdpf - function that computes the gradients of the r's with respect to p, can be NULL if parametric sensitivity is not desired (mu=NULL)
235: .   fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run
236: -   ctx - [optional] user-defined context for private data for the function evaluation routine (may be NULL)

238:     Calling sequence of rf:
239: $   PetscErrorCode rf(TS ts,PetscReal t,Vec U,Vec F,void *ctx);

241:     Calling sequence of drduf:
242: $   PetscErroCode drduf(TS ts,PetscReal t,Vec U,Vec *dRdU,void *ctx);

244:     Calling sequence of drdpf:
245: $   PetscErroCode drdpf(TS ts,PetscReal t,Vec U,Vec *dRdP,void *ctx);

247:     Level: deprecated

249:     Notes:
250:     For optimization there is usually a single cost function (numcost = 1). For sensitivities there may be multiple cost functions

252: .seealso: TSSetRHSJacobianP(), TSGetCostGradients(), TSSetCostGradients()
253: @*/
254: PetscErrorCode TSSetCostIntegrand(TS ts,PetscInt numcost,Vec costintegral,PetscErrorCode (*rf)(TS,PetscReal,Vec,Vec,void*),
255:                                                           PetscErrorCode (*drduf)(TS,PetscReal,Vec,Vec*,void*),
256:                                                           PetscErrorCode (*drdpf)(TS,PetscReal,Vec,Vec*,void*),
257:                                                           PetscBool fwd,void *ctx)
258: {

264:   if (ts->numcost && ts->numcost!=numcost) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"The number of cost functions (2rd parameter of TSSetCostIntegrand()) is inconsistent with the one set by TSSetCostGradients() or TSForwardSetIntegralGradients()");
265:   if (!ts->numcost) ts->numcost=numcost;

267:   if (costintegral) {
268:     PetscObjectReference((PetscObject)costintegral);
269:     VecDestroy(&ts->vec_costintegral);
270:     ts->vec_costintegral = costintegral;
271:   } else {
272:     if (!ts->vec_costintegral) { /* Create a seq vec if user does not provide one */
273:       VecCreateSeq(PETSC_COMM_SELF,numcost,&ts->vec_costintegral);
274:     } else {
275:       VecSet(ts->vec_costintegral,0.0);
276:     }
277:   }
278:   if (!ts->vec_costintegrand) {
279:     VecDuplicate(ts->vec_costintegral,&ts->vec_costintegrand);
280:   } else {
281:     VecSet(ts->vec_costintegrand,0.0);
282:   }
283:   ts->costintegralfwd  = fwd; /* Evaluate the cost integral in forward run if fwd is true */
284:   ts->costintegrand    = rf;
285:   ts->costintegrandctx = ctx;
286:   ts->drdufunction     = drduf;
287:   ts->drdpfunction     = drdpf;
288:   return(0);
289: }

291: /*@C
292:    TSGetCostIntegral - Returns the values of the integral term in the cost functions.
293:    It is valid to call the routine after a backward run.

295:    Not Collective

297:    Input Parameter:
298: .  ts - the TS context obtained from TSCreate()

300:    Output Parameter:
301: .  v - the vector containing the integrals for each cost function

303:    Level: intermediate

305: .seealso: TSSetCostIntegrand()

307: @*/
308: PetscErrorCode  TSGetCostIntegral(TS ts,Vec *v)
309: {
310:   TS             quadts;

316:   TSGetQuadratureTS(ts,NULL,&quadts);
317:   *v = quadts->vec_sol;
318:   return(0);
319: }

321: /*@C
322:    TSComputeCostIntegrand - Evaluates the integral function in the cost functions.

324:    Input Parameters:
325: +  ts - the TS context
326: .  t - current time
327: -  U - state vector, i.e. current solution

329:    Output Parameter:
330: .  Q - vector of size numcost to hold the outputs

332:    Notes:
333:    Most users should not need to explicitly call this routine, as it
334:    is used internally within the sensitivity analysis context.

336:    Level: deprecated

338: .seealso: TSSetCostIntegrand()
339: @*/
340: PetscErrorCode TSComputeCostIntegrand(TS ts,PetscReal t,Vec U,Vec Q)
341: {


349:   PetscLogEventBegin(TS_FunctionEval,ts,U,Q,0);
350:   if (ts->costintegrand) {
351:     PetscStackPush("TS user integrand in the cost function");
352:     (*ts->costintegrand)(ts,t,U,Q,ts->costintegrandctx);
353:     PetscStackPop;
354:   } else {
355:     VecZeroEntries(Q);
356:   }

358:   PetscLogEventEnd(TS_FunctionEval,ts,U,Q,0);
359:   return(0);
360: }

362: /*@C
363:   TSComputeDRDUFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobian()

365:   Level: deprecated

367: @*/
368: PetscErrorCode TSComputeDRDUFunction(TS ts,PetscReal t,Vec U,Vec *DRDU)
369: {

373:   if (!DRDU) return(0);

377:   PetscStackPush("TS user DRDU function for sensitivity analysis");
378:   (*ts->drdufunction)(ts,t,U,DRDU,ts->costintegrandctx);
379:   PetscStackPop;
380:   return(0);
381: }

383: /*@C
384:   TSComputeDRDPFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobianP()

386:   Level: deprecated

388: @*/
389: PetscErrorCode TSComputeDRDPFunction(TS ts,PetscReal t,Vec U,Vec *DRDP)
390: {

394:   if (!DRDP) return(0);

398:   PetscStackPush("TS user DRDP function for sensitivity analysis");
399:   (*ts->drdpfunction)(ts,t,U,DRDP,ts->costintegrandctx);
400:   PetscStackPop;
401:   return(0);
402: }

404: /*@C
405:   TSSetIHessianProduct - Sets the function that computes the vector-Hessian-vector product. The Hessian is the second-order derivative of F (IFunction) w.r.t. the state variable.

407:   Logically Collective on TS

409:   Input Parameters:
410: + ts - TS context obtained from TSCreate()
411: . ihp1 - an array of vectors storing the result of vector-Hessian-vector product for F_UU
412: . hessianproductfunc1 - vector-Hessian-vector product function for F_UU
413: . ihp2 - an array of vectors storing the result of vector-Hessian-vector product for F_UP
414: . hessianproductfunc2 - vector-Hessian-vector product function for F_UP
415: . ihp3 - an array of vectors storing the result of vector-Hessian-vector product for F_PU
416: . hessianproductfunc3 - vector-Hessian-vector product function for F_PU
417: . ihp4 - an array of vectors storing the result of vector-Hessian-vector product for F_PP
418: . hessianproductfunc4 - vector-Hessian-vector product function for F_PP

420:   Calling sequence of ihessianproductfunc:
421: $ ihessianproductfunc (TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV,void *ctx);
422: +   t - current timestep
423: .   U - input vector (current ODE solution)
424: .   Vl - an array of input vectors to be left-multiplied with the Hessian
425: .   Vr - input vector to be right-multiplied with the Hessian
426: .   VHV - an array of output vectors for vector-Hessian-vector product
427: -   ctx - [optional] user-defined function context

429:   Level: intermediate

431:   Notes:
432:   The first Hessian function and the working array are required.
433:   As an example to implement the callback functions, the second callback function calculates the vector-Hessian-vector product
434:   $ Vl_n^T*F_UP*Vr
435:   where the vector Vl_n (n-th element in the array Vl) and Vr are of size N and M respectively, and the Hessian F_UP is of size N x N x M.
436:   Each entry of F_UP corresponds to the derivative
437:   $ F_UP[i][j][k] = \frac{\partial^2 F[i]}{\partial U[j] \partial P[k]}.
438:   The result of the vector-Hessian-vector product for Vl_n needs to be stored in vector VHV_n with the j-th entry being
439:   $ VHV_n[j] = \sum_i \sum_k {Vl_n[i] * F_UP[i][j][k] * Vr[k]}
440:   If the cost function is a scalar, there will be only one vector in Vl and VHV.

442: .seealso:
443: @*/
444: PetscErrorCode TSSetIHessianProduct(TS ts,Vec *ihp1,PetscErrorCode (*ihessianproductfunc1)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
445:                                           Vec *ihp2,PetscErrorCode (*ihessianproductfunc2)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
446:                                           Vec *ihp3,PetscErrorCode (*ihessianproductfunc3)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
447:                                           Vec *ihp4,PetscErrorCode (*ihessianproductfunc4)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
448:                                     void *ctx)
449: {

454:   ts->ihessianproductctx = ctx;
455:   if (ihp1) ts->vecs_fuu = ihp1;
456:   if (ihp2) ts->vecs_fup = ihp2;
457:   if (ihp3) ts->vecs_fpu = ihp3;
458:   if (ihp4) ts->vecs_fpp = ihp4;
459:   ts->ihessianproduct_fuu = ihessianproductfunc1;
460:   ts->ihessianproduct_fup = ihessianproductfunc2;
461:   ts->ihessianproduct_fpu = ihessianproductfunc3;
462:   ts->ihessianproduct_fpp = ihessianproductfunc4;
463:   return(0);
464: }

466: /*@C
467:   TSComputeIHessianProductFunctionUU - Runs the user-defined vector-Hessian-vector product function for Fuu.

469:   Collective on TS

471:   Input Parameters:
472: . ts   - The TS context obtained from TSCreate()

474:   Notes:
475:   TSComputeIHessianProductFunctionUU() is typically used for sensitivity implementation,
476:   so most users would not generally call this routine themselves.

478:   Level: developer

480: .seealso: TSSetIHessianProduct()
481: @*/
482: PetscErrorCode TSComputeIHessianProductFunctionUU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
483: {

487:   if (!VHV) return(0);

491:   if (ts->ihessianproduct_fuu) {
492:     PetscStackPush("TS user IHessianProduct function 1 for sensitivity analysis");
493:     (*ts->ihessianproduct_fuu)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
494:     PetscStackPop;
495:   }
496:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
497:   if (ts->rhshessianproduct_guu) {
498:     PetscInt nadj;
499:     TSComputeRHSHessianProductFunctionUU(ts,t,U,Vl,Vr,VHV);
500:     for (nadj=0; nadj<ts->numcost; nadj++) {
501:       VecScale(VHV[nadj],-1);
502:     }
503:   }
504:   return(0);
505: }

507: /*@C
508:   TSComputeIHessianProductFunctionUP - Runs the user-defined vector-Hessian-vector product function for Fup.

510:   Collective on TS

512:   Input Parameters:
513: . ts   - The TS context obtained from TSCreate()

515:   Notes:
516:   TSComputeIHessianProductFunctionUP() is typically used for sensitivity implementation,
517:   so most users would not generally call this routine themselves.

519:   Level: developer

521: .seealso: TSSetIHessianProduct()
522: @*/
523: PetscErrorCode TSComputeIHessianProductFunctionUP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
524: {

528:   if (!VHV) return(0);

532:   if (ts->ihessianproduct_fup) {
533:     PetscStackPush("TS user IHessianProduct function 2 for sensitivity analysis");
534:     (*ts->ihessianproduct_fup)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
535:     PetscStackPop;
536:   }
537:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
538:   if (ts->rhshessianproduct_gup) {
539:     PetscInt nadj;
540:     TSComputeRHSHessianProductFunctionUP(ts,t,U,Vl,Vr,VHV);
541:     for (nadj=0; nadj<ts->numcost; nadj++) {
542:       VecScale(VHV[nadj],-1);
543:     }
544:   }
545:   return(0);
546: }

548: /*@C
549:   TSComputeIHessianProductFunctionPU - Runs the user-defined vector-Hessian-vector product function for Fpu.

551:   Collective on TS

553:   Input Parameters:
554: . ts   - The TS context obtained from TSCreate()

556:   Notes:
557:   TSComputeIHessianProductFunctionPU() is typically used for sensitivity implementation,
558:   so most users would not generally call this routine themselves.

560:   Level: developer

562: .seealso: TSSetIHessianProduct()
563: @*/
564: PetscErrorCode TSComputeIHessianProductFunctionPU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
565: {

569:   if (!VHV) return(0);

573:   if (ts->ihessianproduct_fpu) {
574:     PetscStackPush("TS user IHessianProduct function 3 for sensitivity analysis");
575:     (*ts->ihessianproduct_fpu)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
576:     PetscStackPop;
577:   }
578:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
579:   if (ts->rhshessianproduct_gpu) {
580:     PetscInt nadj;
581:     TSComputeRHSHessianProductFunctionPU(ts,t,U,Vl,Vr,VHV);
582:     for (nadj=0; nadj<ts->numcost; nadj++) {
583:       VecScale(VHV[nadj],-1);
584:     }
585:   }
586:   return(0);
587: }

589: /*@C
590:   TSComputeIHessianProductFunctionPP - Runs the user-defined vector-Hessian-vector product function for Fpp.

592:   Collective on TS

594:   Input Parameters:
595: . ts   - The TS context obtained from TSCreate()

597:   Notes:
598:   TSComputeIHessianProductFunctionPP() is typically used for sensitivity implementation,
599:   so most users would not generally call this routine themselves.

601:   Level: developer

603: .seealso: TSSetIHessianProduct()
604: @*/
605: PetscErrorCode TSComputeIHessianProductFunctionPP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
606: {

610:   if (!VHV) return(0);

614:   if (ts->ihessianproduct_fpp) {
615:     PetscStackPush("TS user IHessianProduct function 3 for sensitivity analysis");
616:     (*ts->ihessianproduct_fpp)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
617:     PetscStackPop;
618:   }
619:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
620:   if (ts->rhshessianproduct_gpp) {
621:     PetscInt nadj;
622:     TSComputeRHSHessianProductFunctionPP(ts,t,U,Vl,Vr,VHV);
623:     for (nadj=0; nadj<ts->numcost; nadj++) {
624:       VecScale(VHV[nadj],-1);
625:     }
626:   }
627:   return(0);
628: }

630: /*@C
631:   TSSetRHSHessianProduct - Sets the function that computes the vecotr-Hessian-vector product. The Hessian is the second-order derivative of G (RHSFunction) w.r.t. the state variable.

633:   Logically Collective on TS

635:   Input Parameters:
636: + ts - TS context obtained from TSCreate()
637: . rhshp1 - an array of vectors storing the result of vector-Hessian-vector product for G_UU
638: . hessianproductfunc1 - vector-Hessian-vector product function for G_UU
639: . rhshp2 - an array of vectors storing the result of vector-Hessian-vector product for G_UP
640: . hessianproductfunc2 - vector-Hessian-vector product function for G_UP
641: . rhshp3 - an array of vectors storing the result of vector-Hessian-vector product for G_PU
642: . hessianproductfunc3 - vector-Hessian-vector product function for G_PU
643: . rhshp4 - an array of vectors storing the result of vector-Hessian-vector product for G_PP
644: . hessianproductfunc4 - vector-Hessian-vector product function for G_PP

646:   Calling sequence of ihessianproductfunc:
647: $ rhshessianproductfunc (TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV,void *ctx);
648: +   t - current timestep
649: .   U - input vector (current ODE solution)
650: .   Vl - an array of input vectors to be left-multiplied with the Hessian
651: .   Vr - input vector to be right-multiplied with the Hessian
652: .   VHV - an array of output vectors for vector-Hessian-vector product
653: -   ctx - [optional] user-defined function context

655:   Level: intermediate

657:   Notes:
658:   The first Hessian function and the working array are required.
659:   As an example to implement the callback functions, the second callback function calculates the vector-Hessian-vector product
660:   $ Vl_n^T*G_UP*Vr
661:   where the vector Vl_n (n-th element in the array Vl) and Vr are of size N and M respectively, and the Hessian G_UP is of size N x N x M.
662:   Each entry of G_UP corresponds to the derivative
663:   $ G_UP[i][j][k] = \frac{\partial^2 G[i]}{\partial U[j] \partial P[k]}.
664:   The result of the vector-Hessian-vector product for Vl_n needs to be stored in vector VHV_n with j-th entry being
665:   $ VHV_n[j] = \sum_i \sum_k {Vl_n[i] * G_UP[i][j][k] * Vr[k]}
666:   If the cost function is a scalar, there will be only one vector in Vl and VHV.

668: .seealso:
669: @*/
670: PetscErrorCode TSSetRHSHessianProduct(TS ts,Vec *rhshp1,PetscErrorCode (*rhshessianproductfunc1)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
671:                                           Vec *rhshp2,PetscErrorCode (*rhshessianproductfunc2)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
672:                                           Vec *rhshp3,PetscErrorCode (*rhshessianproductfunc3)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
673:                                           Vec *rhshp4,PetscErrorCode (*rhshessianproductfunc4)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
674:                                     void *ctx)
675: {

680:   ts->rhshessianproductctx = ctx;
681:   if (rhshp1) ts->vecs_guu = rhshp1;
682:   if (rhshp2) ts->vecs_gup = rhshp2;
683:   if (rhshp3) ts->vecs_gpu = rhshp3;
684:   if (rhshp4) ts->vecs_gpp = rhshp4;
685:   ts->rhshessianproduct_guu = rhshessianproductfunc1;
686:   ts->rhshessianproduct_gup = rhshessianproductfunc2;
687:   ts->rhshessianproduct_gpu = rhshessianproductfunc3;
688:   ts->rhshessianproduct_gpp = rhshessianproductfunc4;
689:   return(0);
690: }

692: /*@C
693:   TSComputeRHSHessianProductFunctionUU - Runs the user-defined vector-Hessian-vector product function for Guu.

695:   Collective on TS

697:   Input Parameters:
698: . ts   - The TS context obtained from TSCreate()

700:   Notes:
701:   TSComputeRHSHessianProductFunctionUU() is typically used for sensitivity implementation,
702:   so most users would not generally call this routine themselves.

704:   Level: developer

706: .seealso: TSSetRHSHessianProduct()
707: @*/
708: PetscErrorCode TSComputeRHSHessianProductFunctionUU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
709: {

713:   if (!VHV) return(0);

717:   PetscStackPush("TS user RHSHessianProduct function 1 for sensitivity analysis");
718:   (*ts->rhshessianproduct_guu)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
719:   PetscStackPop;
720:   return(0);
721: }

723: /*@C
724:   TSComputeRHSHessianProductFunctionUP - Runs the user-defined vector-Hessian-vector product function for Gup.

726:   Collective on TS

728:   Input Parameters:
729: . ts   - The TS context obtained from TSCreate()

731:   Notes:
732:   TSComputeRHSHessianProductFunctionUP() is typically used for sensitivity implementation,
733:   so most users would not generally call this routine themselves.

735:   Level: developer

737: .seealso: TSSetRHSHessianProduct()
738: @*/
739: PetscErrorCode TSComputeRHSHessianProductFunctionUP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
740: {

744:   if (!VHV) return(0);

748:   PetscStackPush("TS user RHSHessianProduct function 2 for sensitivity analysis");
749:   (*ts->rhshessianproduct_gup)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
750:   PetscStackPop;
751:   return(0);
752: }

754: /*@C
755:   TSComputeRHSHessianProductFunctionPU - Runs the user-defined vector-Hessian-vector product function for Gpu.

757:   Collective on TS

759:   Input Parameters:
760: . ts   - The TS context obtained from TSCreate()

762:   Notes:
763:   TSComputeRHSHessianProductFunctionPU() is typically used for sensitivity implementation,
764:   so most users would not generally call this routine themselves.

766:   Level: developer

768: .seealso: TSSetRHSHessianProduct()
769: @*/
770: PetscErrorCode TSComputeRHSHessianProductFunctionPU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
771: {

775:   if (!VHV) return(0);

779:   PetscStackPush("TS user RHSHessianProduct function 3 for sensitivity analysis");
780:   (*ts->rhshessianproduct_gpu)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
781:   PetscStackPop;
782:   return(0);
783: }

785: /*@C
786:   TSComputeRHSHessianProductFunctionPP - Runs the user-defined vector-Hessian-vector product function for Gpp.

788:   Collective on TS

790:   Input Parameters:
791: . ts   - The TS context obtained from TSCreate()

793:   Notes:
794:   TSComputeRHSHessianProductFunctionPP() is typically used for sensitivity implementation,
795:   so most users would not generally call this routine themselves.

797:   Level: developer

799: .seealso: TSSetRHSHessianProduct()
800: @*/
801: PetscErrorCode TSComputeRHSHessianProductFunctionPP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
802: {

806:   if (!VHV) return(0);

810:   PetscStackPush("TS user RHSHessianProduct function 3 for sensitivity analysis");
811:   (*ts->rhshessianproduct_gpp)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
812:   PetscStackPop;
813:   return(0);
814: }

816: /* --------------------------- Adjoint sensitivity ---------------------------*/

818: /*@
819:    TSSetCostGradients - Sets the initial value of the gradients of the cost function w.r.t. initial values and w.r.t. the problem parameters
820:       for use by the TSAdjoint routines.

822:    Logically Collective on TS

824:    Input Parameters:
825: +  ts - the TS context obtained from TSCreate()
826: .  lambda - gradients with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
827: -  mu - gradients with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

829:    Level: beginner

831:    Notes:
832:     the entries in these vectors must be correctly initialized with the values lamda_i = df/dy|finaltime  mu_i = df/dp|finaltime

834:    After TSAdjointSolve() is called the lamba and the mu contain the computed sensitivities

836: .seealso TSGetCostGradients()
837: @*/
838: PetscErrorCode TSSetCostGradients(TS ts,PetscInt numcost,Vec *lambda,Vec *mu)
839: {
843:   ts->vecs_sensi  = lambda;
844:   ts->vecs_sensip = mu;
845:   if (ts->numcost && ts->numcost!=numcost) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"The number of cost functions (2rd parameter of TSSetCostIntegrand()) is inconsistent with the one set by TSSetCostIntegrand");
846:   ts->numcost  = numcost;
847:   return(0);
848: }

850: /*@
851:    TSGetCostGradients - Returns the gradients from the TSAdjointSolve()

853:    Not Collective, but Vec returned is parallel if TS is parallel

855:    Input Parameter:
856: .  ts - the TS context obtained from TSCreate()

858:    Output Parameter:
859: +  lambda - vectors containing the gradients of the cost functions with respect to the ODE/DAE solution variables
860: -  mu - vectors containing the gradients of the cost functions with respect to the problem parameters

862:    Level: intermediate

864: .seealso: TSSetCostGradients()
865: @*/
866: PetscErrorCode TSGetCostGradients(TS ts,PetscInt *numcost,Vec **lambda,Vec **mu)
867: {
870:   if (numcost) *numcost = ts->numcost;
871:   if (lambda)  *lambda  = ts->vecs_sensi;
872:   if (mu)      *mu      = ts->vecs_sensip;
873:   return(0);
874: }

876: /*@
877:    TSSetCostHessianProducts - Sets the initial value of the Hessian-vector products of the cost function w.r.t. initial values and w.r.t. the problem parameters
878:       for use by the TSAdjoint routines.

880:    Logically Collective on TS

882:    Input Parameters:
883: +  ts - the TS context obtained from TSCreate()
884: .  numcost - number of cost functions
885: .  lambda2 - Hessian-vector product with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
886: .  mu2 - Hessian-vector product with respect to the parameters, the number of entries in these vectors is the same as the number of parameters
887: -  dir - the direction vector that are multiplied with the Hessian of the cost functions

889:    Level: beginner

891:    Notes: Hessian of the cost function is completely different from Hessian of the ODE/DAE system

893:    For second-order adjoint, one needs to call this function and then TSAdjointSetForward() before TSSolve().

895:    After TSAdjointSolve() is called, the lamba2 and the mu2 will contain the computed second-order adjoint sensitivities, and can be used to produce Hessian-vector product (not the full Hessian matrix). Users must provide a direction vector; it is usually generated by an optimization solver.

897:    Passing NULL for lambda2 disables the second-order calculation.
898: .seealso: TSAdjointSetForward()
899: @*/
900: PetscErrorCode TSSetCostHessianProducts(TS ts,PetscInt numcost,Vec *lambda2,Vec *mu2,Vec dir)
901: {
904:   if (ts->numcost && ts->numcost!=numcost) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"The number of cost functions (2rd parameter of TSSetCostIntegrand()) is inconsistent with the one set by TSSetCostIntegrand");
905:   ts->numcost       = numcost;
906:   ts->vecs_sensi2   = lambda2;
907:   ts->vecs_sensi2p  = mu2;
908:   ts->vec_dir       = dir;
909:   return(0);
910: }

912: /*@
913:    TSGetCostHessianProducts - Returns the gradients from the TSAdjointSolve()

915:    Not Collective, but Vec returned is parallel if TS is parallel

917:    Input Parameter:
918: .  ts - the TS context obtained from TSCreate()

920:    Output Parameter:
921: +  numcost - number of cost functions
922: .  lambda2 - Hessian-vector product with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
923: .  mu2 - Hessian-vector product with respect to the parameters, the number of entries in these vectors is the same as the number of parameters
924: -  dir - the direction vector that are multiplied with the Hessian of the cost functions

926:    Level: intermediate

928: .seealso: TSSetCostHessianProducts()
929: @*/
930: PetscErrorCode TSGetCostHessianProducts(TS ts,PetscInt *numcost,Vec **lambda2,Vec **mu2, Vec *dir)
931: {
934:   if (numcost) *numcost = ts->numcost;
935:   if (lambda2) *lambda2 = ts->vecs_sensi2;
936:   if (mu2)     *mu2     = ts->vecs_sensi2p;
937:   if (dir)     *dir     = ts->vec_dir;
938:   return(0);
939: }

941: /*@
942:   TSAdjointSetForward - Trigger the tangent linear solver and initialize the forward sensitivities

944:   Logically Collective on TS

946:   Input Parameters:
947: +  ts - the TS context obtained from TSCreate()
948: -  didp - the derivative of initial values w.r.t. parameters

950:   Level: intermediate

952:   Notes: When computing sensitivies w.r.t. initial condition, set didp to NULL so that the solver will take it as an identity matrix mathematically. TSAdjoint does not reset the tangent linear solver automatically, TSAdjointResetForward() should be called to reset the tangent linear solver.

954: .seealso: TSSetCostHessianProducts(), TSAdjointResetForward()
955: @*/
956: PetscErrorCode TSAdjointSetForward(TS ts,Mat didp)
957: {
958:   Mat            A;
959:   Vec            sp;
960:   PetscScalar    *xarr;
961:   PetscInt       lsize;

965:   ts->forward_solve = PETSC_TRUE; /* turn on tangent linear mode */
966:   if (!ts->vecs_sensi2) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"Must call TSSetCostHessianProducts() first");
967:   if (!ts->vec_dir) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"Directional vector is missing. Call TSSetCostHessianProducts() to set it.");
968:   /* create a single-column dense matrix */
969:   VecGetLocalSize(ts->vec_sol,&lsize);
970:   MatCreateDense(PetscObjectComm((PetscObject)ts),lsize,PETSC_DECIDE,PETSC_DECIDE,1,NULL,&A);

972:   VecDuplicate(ts->vec_sol,&sp);
973:   MatDenseGetColumn(A,0,&xarr);
974:   VecPlaceArray(sp,xarr);
975:   if (ts->vecs_sensi2p) { /* tangent linear variable initialized as 2*dIdP*dir */
976:     if (didp) {
977:       MatMult(didp,ts->vec_dir,sp);
978:       VecScale(sp,2.);
979:     } else {
980:       VecZeroEntries(sp);
981:     }
982:   } else { /* tangent linear variable initialized as dir */
983:     VecCopy(ts->vec_dir,sp);
984:   }
985:   VecResetArray(sp);
986:   MatDenseRestoreColumn(A,&xarr);
987:   VecDestroy(&sp);

989:   TSForwardSetInitialSensitivities(ts,A); /* if didp is NULL, identity matrix is assumed */

991:   MatDestroy(&A);
992:   return(0);
993: }

995: /*@
996:   TSAdjointResetForward - Reset the tangent linear solver and destroy the tangent linear context

998:   Logically Collective on TS

1000:   Input Parameters:
1001: .  ts - the TS context obtained from TSCreate()

1003:   Level: intermediate

1005: .seealso: TSAdjointSetForward()
1006: @*/
1007: PetscErrorCode TSAdjointResetForward(TS ts)
1008: {

1012:   ts->forward_solve = PETSC_FALSE; /* turn off tangent linear mode */
1013:   TSForwardReset(ts);
1014:   return(0);
1015: }

1017: /*@
1018:    TSAdjointSetUp - Sets up the internal data structures for the later use
1019:    of an adjoint solver

1021:    Collective on TS

1023:    Input Parameter:
1024: .  ts - the TS context obtained from TSCreate()

1026:    Level: advanced

1028: .seealso: TSCreate(), TSAdjointStep(), TSSetCostGradients()
1029: @*/
1030: PetscErrorCode TSAdjointSetUp(TS ts)
1031: {
1032:   TSTrajectory     tj;
1033:   PetscBool        match;
1034:   PetscErrorCode   ierr;

1038:   if (ts->adjointsetupcalled) return(0);
1039:   if (!ts->vecs_sensi) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_WRONGSTATE,"Must call TSSetCostGradients() first");
1040:   if (ts->vecs_sensip && !ts->Jacp && !ts->Jacprhs) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_WRONGSTATE,"Must call TSSetRHSJacobianP() or TSSetIJacobianP() first");
1041:   TSGetTrajectory(ts,&tj);
1042:   PetscObjectTypeCompare((PetscObject)tj,TSTRAJECTORYBASIC,&match);
1043:   if (match) {
1044:     PetscBool solution_only;
1045:     TSTrajectoryGetSolutionOnly(tj,&solution_only);
1046:     if (solution_only) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"TSAdjoint cannot use the solution-only mode when choosing the Basic TSTrajectory type. Turn it off with -ts_trajectory_solution_only 0");
1047:   }
1048:   TSTrajectorySetUseHistory(tj,PETSC_FALSE); /* not use TSHistory */

1050:   if (ts->quadraturets) { /* if there is integral in the cost function */
1051:     VecDuplicate(ts->vecs_sensi[0],&ts->vec_drdu_col);
1052:     if (ts->vecs_sensip){
1053:       VecDuplicate(ts->vecs_sensip[0],&ts->vec_drdp_col);
1054:     }
1055:   }

1057:   if (ts->ops->adjointsetup) {
1058:     (*ts->ops->adjointsetup)(ts);
1059:   }
1060:   ts->adjointsetupcalled = PETSC_TRUE;
1061:   return(0);
1062: }

1064: /*@
1065:    TSAdjointReset - Resets a TSAdjoint context and removes any allocated Vecs and Mats.

1067:    Collective on TS

1069:    Input Parameter:
1070: .  ts - the TS context obtained from TSCreate()

1072:    Level: beginner

1074: .seealso: TSCreate(), TSAdjointSetUp(), TSADestroy()
1075: @*/
1076: PetscErrorCode TSAdjointReset(TS ts)
1077: {

1082:   if (ts->ops->adjointreset) {
1083:     (*ts->ops->adjointreset)(ts);
1084:   }
1085:   if (ts->quadraturets) { /* if there is integral in the cost function */
1086:     VecDestroy(&ts->vec_drdu_col);
1087:     if (ts->vecs_sensip){
1088:       VecDestroy(&ts->vec_drdp_col);
1089:     }
1090:   }
1091:   ts->vecs_sensi         = NULL;
1092:   ts->vecs_sensip        = NULL;
1093:   ts->vecs_sensi2        = NULL;
1094:   ts->vecs_sensi2p       = NULL;
1095:   ts->vec_dir            = NULL;
1096:   ts->adjointsetupcalled = PETSC_FALSE;
1097:   return(0);
1098: }

1100: /*@
1101:    TSAdjointSetSteps - Sets the number of steps the adjoint solver should take backward in time

1103:    Logically Collective on TS

1105:    Input Parameters:
1106: +  ts - the TS context obtained from TSCreate()
1107: .  steps - number of steps to use

1109:    Level: intermediate

1111:    Notes:
1112:     Normally one does not call this and TSAdjointSolve() integrates back to the original timestep. One can call this
1113:           so as to integrate back to less than the original timestep

1115: .seealso: TSSetExactFinalTime()
1116: @*/
1117: PetscErrorCode TSAdjointSetSteps(TS ts,PetscInt steps)
1118: {
1122:   if (steps < 0) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_OUTOFRANGE,"Cannot step back a negative number of steps");
1123:   if (steps > ts->steps) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_OUTOFRANGE,"Cannot step back more than the total number of forward steps");
1124:   ts->adjoint_max_steps = steps;
1125:   return(0);
1126: }

1128: /*@C
1129:   TSAdjointSetRHSJacobian - Deprecated, use TSSetRHSJacobianP()

1131:   Level: deprecated

1133: @*/
1134: PetscErrorCode TSAdjointSetRHSJacobian(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Mat,void*),void *ctx)
1135: {


1142:   ts->rhsjacobianp    = func;
1143:   ts->rhsjacobianpctx = ctx;
1144:   if(Amat) {
1145:     PetscObjectReference((PetscObject)Amat);
1146:     MatDestroy(&ts->Jacp);
1147:     ts->Jacp = Amat;
1148:   }
1149:   return(0);
1150: }

1152: /*@C
1153:   TSAdjointComputeRHSJacobian - Deprecated, use TSComputeRHSJacobianP()

1155:   Level: deprecated

1157: @*/
1158: PetscErrorCode TSAdjointComputeRHSJacobian(TS ts,PetscReal t,Vec U,Mat Amat)
1159: {


1167:   PetscStackPush("TS user JacobianP function for sensitivity analysis");
1168:   (*ts->rhsjacobianp)(ts,t,U,Amat,ts->rhsjacobianpctx);
1169:   PetscStackPop;
1170:   return(0);
1171: }

1173: /*@
1174:   TSAdjointComputeDRDYFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobian()

1176:   Level: deprecated

1178: @*/
1179: PetscErrorCode TSAdjointComputeDRDYFunction(TS ts,PetscReal t,Vec U,Vec *DRDU)
1180: {


1187:   PetscStackPush("TS user DRDY function for sensitivity analysis");
1188:   (*ts->drdufunction)(ts,t,U,DRDU,ts->costintegrandctx);
1189:   PetscStackPop;
1190:   return(0);
1191: }

1193: /*@
1194:   TSAdjointComputeDRDPFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobianP()

1196:   Level: deprecated

1198: @*/
1199: PetscErrorCode TSAdjointComputeDRDPFunction(TS ts,PetscReal t,Vec U,Vec *DRDP)
1200: {


1207:   PetscStackPush("TS user DRDP function for sensitivity analysis");
1208:   (*ts->drdpfunction)(ts,t,U,DRDP,ts->costintegrandctx);
1209:   PetscStackPop;
1210:   return(0);
1211: }

1213: /*@C
1214:    TSAdjointMonitorSensi - monitors the first lambda sensitivity

1216:    Level: intermediate

1218: .seealso: TSAdjointMonitorSet()
1219: @*/
1220: PetscErrorCode TSAdjointMonitorSensi(TS ts,PetscInt step,PetscReal ptime,Vec v,PetscInt numcost,Vec *lambda,Vec *mu,PetscViewerAndFormat *vf)
1221: {
1223:   PetscViewer    viewer = vf->viewer;

1227:   PetscViewerPushFormat(viewer,vf->format);
1228:   VecView(lambda[0],viewer);
1229:   PetscViewerPopFormat(viewer);
1230:   return(0);
1231: }

1233: /*@C
1234:    TSAdjointMonitorSetFromOptions - Sets a monitor function and viewer appropriate for the type indicated by the user

1236:    Collective on TS

1238:    Input Parameters:
1239: +  ts - TS object you wish to monitor
1240: .  name - the monitor type one is seeking
1241: .  help - message indicating what monitoring is done
1242: .  manual - manual page for the monitor
1243: .  monitor - the monitor function
1244: -  monitorsetup - a function that is called once ONLY if the user selected this monitor that may set additional features of the TS or PetscViewer objects

1246:    Level: developer

1248: .seealso: PetscOptionsGetViewer(), PetscOptionsGetReal(), PetscOptionsHasName(), PetscOptionsGetString(),
1249:           PetscOptionsGetIntArray(), PetscOptionsGetRealArray(), PetscOptionsBool()
1250:           PetscOptionsInt(), PetscOptionsString(), PetscOptionsReal(), PetscOptionsBool(),
1251:           PetscOptionsName(), PetscOptionsBegin(), PetscOptionsEnd(), PetscOptionsHead(),
1252:           PetscOptionsStringArray(),PetscOptionsRealArray(), PetscOptionsScalar(),
1253:           PetscOptionsBoolGroupBegin(), PetscOptionsBoolGroup(), PetscOptionsBoolGroupEnd(),
1254:           PetscOptionsFList(), PetscOptionsEList()
1255: @*/
1256: PetscErrorCode TSAdjointMonitorSetFromOptions(TS ts,const char name[],const char help[], const char manual[],PetscErrorCode (*monitor)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,PetscViewerAndFormat*),PetscErrorCode (*monitorsetup)(TS,PetscViewerAndFormat*))
1257: {
1258:   PetscErrorCode    ierr;
1259:   PetscViewer       viewer;
1260:   PetscViewerFormat format;
1261:   PetscBool         flg;

1264:   PetscOptionsGetViewer(PetscObjectComm((PetscObject)ts),((PetscObject) ts)->options,((PetscObject)ts)->prefix,name,&viewer,&format,&flg);
1265:   if (flg) {
1266:     PetscViewerAndFormat *vf;
1267:     PetscViewerAndFormatCreate(viewer,format,&vf);
1268:     PetscObjectDereference((PetscObject)viewer);
1269:     if (monitorsetup) {
1270:       (*monitorsetup)(ts,vf);
1271:     }
1272:     TSAdjointMonitorSet(ts,(PetscErrorCode (*)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,void*))monitor,vf,(PetscErrorCode (*)(void**))PetscViewerAndFormatDestroy);
1273:   }
1274:   return(0);
1275: }

1277: /*@C
1278:    TSAdjointMonitorSet - Sets an ADDITIONAL function that is to be used at every
1279:    timestep to display the iteration's  progress.

1281:    Logically Collective on TS

1283:    Input Parameters:
1284: +  ts - the TS context obtained from TSCreate()
1285: .  adjointmonitor - monitoring routine
1286: .  adjointmctx - [optional] user-defined context for private data for the
1287:              monitor routine (use NULL if no context is desired)
1288: -  adjointmonitordestroy - [optional] routine that frees monitor context
1289:           (may be NULL)

1291:    Calling sequence of monitor:
1292: $    int adjointmonitor(TS ts,PetscInt steps,PetscReal time,Vec u,PetscInt numcost,Vec *lambda, Vec *mu,void *adjointmctx)

1294: +    ts - the TS context
1295: .    steps - iteration number (after the final time step the monitor routine is called with a step of -1, this is at the final time which may have
1296:                                been interpolated to)
1297: .    time - current time
1298: .    u - current iterate
1299: .    numcost - number of cost functionos
1300: .    lambda - sensitivities to initial conditions
1301: .    mu - sensitivities to parameters
1302: -    adjointmctx - [optional] adjoint monitoring context

1304:    Notes:
1305:    This routine adds an additional monitor to the list of monitors that
1306:    already has been loaded.

1308:    Fortran Notes:
1309:     Only a single monitor function can be set for each TS object

1311:    Level: intermediate

1313: .seealso: TSAdjointMonitorCancel()
1314: @*/
1315: PetscErrorCode TSAdjointMonitorSet(TS ts,PetscErrorCode (*adjointmonitor)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,void*),void *adjointmctx,PetscErrorCode (*adjointmdestroy)(void**))
1316: {
1318:   PetscInt       i;
1319:   PetscBool      identical;

1323:   for (i=0; i<ts->numbermonitors;i++) {
1324:     PetscMonitorCompare((PetscErrorCode (*)(void))adjointmonitor,adjointmctx,adjointmdestroy,(PetscErrorCode (*)(void))ts->adjointmonitor[i],ts->adjointmonitorcontext[i],ts->adjointmonitordestroy[i],&identical);
1325:     if (identical) return(0);
1326:   }
1327:   if (ts->numberadjointmonitors >= MAXTSMONITORS) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Too many adjoint monitors set");
1328:   ts->adjointmonitor[ts->numberadjointmonitors]          = adjointmonitor;
1329:   ts->adjointmonitordestroy[ts->numberadjointmonitors]   = adjointmdestroy;
1330:   ts->adjointmonitorcontext[ts->numberadjointmonitors++] = (void*)adjointmctx;
1331:   return(0);
1332: }

1334: /*@C
1335:    TSAdjointMonitorCancel - Clears all the adjoint monitors that have been set on a time-step object.

1337:    Logically Collective on TS

1339:    Input Parameters:
1340: .  ts - the TS context obtained from TSCreate()

1342:    Notes:
1343:    There is no way to remove a single, specific monitor.

1345:    Level: intermediate

1347: .seealso: TSAdjointMonitorSet()
1348: @*/
1349: PetscErrorCode TSAdjointMonitorCancel(TS ts)
1350: {
1352:   PetscInt       i;

1356:   for (i=0; i<ts->numberadjointmonitors; i++) {
1357:     if (ts->adjointmonitordestroy[i]) {
1358:       (*ts->adjointmonitordestroy[i])(&ts->adjointmonitorcontext[i]);
1359:     }
1360:   }
1361:   ts->numberadjointmonitors = 0;
1362:   return(0);
1363: }

1365: /*@C
1366:    TSAdjointMonitorDefault - the default monitor of adjoint computations

1368:    Level: intermediate

1370: .seealso: TSAdjointMonitorSet()
1371: @*/
1372: PetscErrorCode TSAdjointMonitorDefault(TS ts,PetscInt step,PetscReal ptime,Vec v,PetscInt numcost,Vec *lambda,Vec *mu,PetscViewerAndFormat *vf)
1373: {
1375:   PetscViewer    viewer = vf->viewer;

1379:   PetscViewerPushFormat(viewer,vf->format);
1380:   PetscViewerASCIIAddTab(viewer,((PetscObject)ts)->tablevel);
1381:   PetscViewerASCIIPrintf(viewer,"%D TS dt %g time %g%s",step,(double)ts->time_step,(double)ptime,ts->steprollback ? " (r)\n" : "\n");
1382:   PetscViewerASCIISubtractTab(viewer,((PetscObject)ts)->tablevel);
1383:   PetscViewerPopFormat(viewer);
1384:   return(0);
1385: }

1387: /*@C
1388:    TSAdjointMonitorDrawSensi - Monitors progress of the adjoint TS solvers by calling
1389:    VecView() for the sensitivities to initial states at each timestep

1391:    Collective on TS

1393:    Input Parameters:
1394: +  ts - the TS context
1395: .  step - current time-step
1396: .  ptime - current time
1397: .  u - current state
1398: .  numcost - number of cost functions
1399: .  lambda - sensitivities to initial conditions
1400: .  mu - sensitivities to parameters
1401: -  dummy - either a viewer or NULL

1403:    Level: intermediate

1405: .seealso: TSAdjointMonitorSet(), TSAdjointMonitorDefault(), VecView()
1406: @*/
1407: PetscErrorCode TSAdjointMonitorDrawSensi(TS ts,PetscInt step,PetscReal ptime,Vec u,PetscInt numcost,Vec *lambda,Vec *mu,void *dummy)
1408: {
1409:   PetscErrorCode   ierr;
1410:   TSMonitorDrawCtx ictx = (TSMonitorDrawCtx)dummy;
1411:   PetscDraw        draw;
1412:   PetscReal        xl,yl,xr,yr,h;
1413:   char             time[32];

1416:   if (!(((ictx->howoften > 0) && (!(step % ictx->howoften))) || ((ictx->howoften == -1) && ts->reason))) return(0);

1418:   VecView(lambda[0],ictx->viewer);
1419:   PetscViewerDrawGetDraw(ictx->viewer,0,&draw);
1420:   PetscSNPrintf(time,32,"Timestep %d Time %g",(int)step,(double)ptime);
1421:   PetscDrawGetCoordinates(draw,&xl,&yl,&xr,&yr);
1422:   h    = yl + .95*(yr - yl);
1423:   PetscDrawStringCentered(draw,.5*(xl+xr),h,PETSC_DRAW_BLACK,time);
1424:   PetscDrawFlush(draw);
1425:   return(0);
1426: }

1428: /*
1429:    TSAdjointSetFromOptions - Sets various TSAdjoint parameters from user options.

1431:    Collective on TSAdjoint

1433:    Input Parameter:
1434: .  ts - the TS context

1436:    Options Database Keys:
1437: +  -ts_adjoint_solve <yes,no> After solving the ODE/DAE solve the adjoint problem (requires -ts_save_trajectory)
1438: .  -ts_adjoint_monitor - print information at each adjoint time step
1439: -  -ts_adjoint_monitor_draw_sensi - monitor the sensitivity of the first cost function wrt initial conditions (lambda[0]) graphically

1441:    Level: developer

1443:    Notes:
1444:     This is not normally called directly by users

1446: .seealso: TSSetSaveTrajectory(), TSTrajectorySetUp()
1447: */
1448: PetscErrorCode TSAdjointSetFromOptions(PetscOptionItems *PetscOptionsObject,TS ts)
1449: {
1450:   PetscBool      tflg,opt;

1455:   PetscOptionsHead(PetscOptionsObject,"TS Adjoint options");
1456:   tflg = ts->adjoint_solve ? PETSC_TRUE : PETSC_FALSE;
1457:   PetscOptionsBool("-ts_adjoint_solve","Solve the adjoint problem immediately after solving the forward problem","",tflg,&tflg,&opt);
1458:   if (opt) {
1459:     TSSetSaveTrajectory(ts);
1460:     ts->adjoint_solve = tflg;
1461:   }
1462:   TSAdjointMonitorSetFromOptions(ts,"-ts_adjoint_monitor","Monitor adjoint timestep size","TSAdjointMonitorDefault",TSAdjointMonitorDefault,NULL);
1463:   TSAdjointMonitorSetFromOptions(ts,"-ts_adjoint_monitor_sensi","Monitor sensitivity in the adjoint computation","TSAdjointMonitorSensi",TSAdjointMonitorSensi,NULL);
1464:   opt  = PETSC_FALSE;
1465:   PetscOptionsName("-ts_adjoint_monitor_draw_sensi","Monitor adjoint sensitivities (lambda only) graphically","TSAdjointMonitorDrawSensi",&opt);
1466:   if (opt) {
1467:     TSMonitorDrawCtx ctx;
1468:     PetscInt         howoften = 1;

1470:     PetscOptionsInt("-ts_adjoint_monitor_draw_sensi","Monitor adjoint sensitivities (lambda only) graphically","TSAdjointMonitorDrawSensi",howoften,&howoften,NULL);
1471:     TSMonitorDrawCtxCreate(PetscObjectComm((PetscObject)ts),0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&ctx);
1472:     TSAdjointMonitorSet(ts,TSAdjointMonitorDrawSensi,ctx,(PetscErrorCode (*)(void**))TSMonitorDrawCtxDestroy);
1473:   }
1474:   return(0);
1475: }

1477: /*@
1478:    TSAdjointStep - Steps one time step backward in the adjoint run

1480:    Collective on TS

1482:    Input Parameter:
1483: .  ts - the TS context obtained from TSCreate()

1485:    Level: intermediate

1487: .seealso: TSAdjointSetUp(), TSAdjointSolve()
1488: @*/
1489: PetscErrorCode TSAdjointStep(TS ts)
1490: {
1491:   DM               dm;
1492:   PetscErrorCode   ierr;

1496:   TSGetDM(ts,&dm);
1497:   TSAdjointSetUp(ts);

1499:   ts->reason = TS_CONVERGED_ITERATING;
1500:   ts->ptime_prev = ts->ptime;
1501:   if (!ts->ops->adjointstep) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_NOT_CONVERGED,"TSStep has failed because the adjoint of  %s has not been implemented, try other time stepping methods for adjoint sensitivity analysis",((PetscObject)ts)->type_name);
1502:   PetscLogEventBegin(TS_AdjointStep,ts,0,0,0);
1503:   (*ts->ops->adjointstep)(ts);
1504:   PetscLogEventEnd(TS_AdjointStep,ts,0,0,0);
1505:   ts->adjoint_steps++; ts->steps--;

1507:   if (ts->reason < 0) {
1508:     if (ts->errorifstepfailed) {
1509:       if (ts->reason == TSADJOINT_DIVERGED_LINEAR_SOLVE) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_NOT_CONVERGED,"TSAdjointStep has failed due to %s",TSConvergedReasons[ts->reason]);
1510:       else SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_NOT_CONVERGED,"TSAdjointStep has failed due to %s",TSConvergedReasons[ts->reason]);
1511:     }
1512:   } else if (!ts->reason) {
1513:     if (ts->adjoint_steps >= ts->adjoint_max_steps) ts->reason = TS_CONVERGED_ITS;
1514:   }
1515:   return(0);
1516: }

1518: /*@
1519:    TSAdjointSolve - Solves the discrete ajoint problem for an ODE/DAE

1521:    Collective on TS

1523:    Input Parameter:
1524: .  ts - the TS context obtained from TSCreate()

1526:    Options Database:
1527: . -ts_adjoint_view_solution <viewerinfo> - views the first gradient with respect to the initial values

1529:    Level: intermediate

1531:    Notes:
1532:    This must be called after a call to TSSolve() that solves the forward problem

1534:    By default this will integrate back to the initial time, one can use TSAdjointSetSteps() to step back to a later time

1536: .seealso: TSCreate(), TSSetCostGradients(), TSSetSolution(), TSAdjointStep()
1537: @*/
1538: PetscErrorCode TSAdjointSolve(TS ts)
1539: {
1540:   PetscErrorCode    ierr;

1544:   TSAdjointSetUp(ts);

1546:   /* reset time step and iteration counters */
1547:   ts->adjoint_steps     = 0;
1548:   ts->ksp_its           = 0;
1549:   ts->snes_its          = 0;
1550:   ts->num_snes_failures = 0;
1551:   ts->reject            = 0;
1552:   ts->reason            = TS_CONVERGED_ITERATING;

1554:   if (!ts->adjoint_max_steps) ts->adjoint_max_steps = ts->steps;
1555:   if (ts->adjoint_steps >= ts->adjoint_max_steps) ts->reason = TS_CONVERGED_ITS;

1557:   while (!ts->reason) {
1558:     TSTrajectoryGet(ts->trajectory,ts,ts->steps,&ts->ptime);
1559:     TSAdjointMonitor(ts,ts->steps,ts->ptime,ts->vec_sol,ts->numcost,ts->vecs_sensi,ts->vecs_sensip);
1560:     TSAdjointEventHandler(ts);
1561:     TSAdjointStep(ts);
1562:     if ((ts->vec_costintegral || ts->quadraturets) && !ts->costintegralfwd) {
1563:       TSAdjointCostIntegral(ts);
1564:     }
1565:   }
1566:   TSTrajectoryGet(ts->trajectory,ts,ts->steps,&ts->ptime);
1567:   TSAdjointMonitor(ts,ts->steps,ts->ptime,ts->vec_sol,ts->numcost,ts->vecs_sensi,ts->vecs_sensip);
1568:   ts->solvetime = ts->ptime;
1569:   TSTrajectoryViewFromOptions(ts->trajectory,NULL,"-ts_trajectory_view");
1570:   VecViewFromOptions(ts->vecs_sensi[0],(PetscObject) ts, "-ts_adjoint_view_solution");
1571:   ts->adjoint_max_steps = 0;
1572:   return(0);
1573: }

1575: /*@C
1576:    TSAdjointMonitor - Runs all user-provided adjoint monitor routines set using TSAdjointMonitorSet()

1578:    Collective on TS

1580:    Input Parameters:
1581: +  ts - time stepping context obtained from TSCreate()
1582: .  step - step number that has just completed
1583: .  ptime - model time of the state
1584: .  u - state at the current model time
1585: .  numcost - number of cost functions (dimension of lambda  or mu)
1586: .  lambda - vectors containing the gradients of the cost functions with respect to the ODE/DAE solution variables
1587: -  mu - vectors containing the gradients of the cost functions with respect to the problem parameters

1589:    Notes:
1590:    TSAdjointMonitor() is typically used automatically within the time stepping implementations.
1591:    Users would almost never call this routine directly.

1593:    Level: developer

1595: @*/
1596: PetscErrorCode TSAdjointMonitor(TS ts,PetscInt step,PetscReal ptime,Vec u,PetscInt numcost,Vec *lambda, Vec *mu)
1597: {
1599:   PetscInt       i,n = ts->numberadjointmonitors;

1604:   VecLockReadPush(u);
1605:   for (i=0; i<n; i++) {
1606:     (*ts->adjointmonitor[i])(ts,step,ptime,u,numcost,lambda,mu,ts->adjointmonitorcontext[i]);
1607:   }
1608:   VecLockReadPop(u);
1609:   return(0);
1610: }

1612: /*@
1613:  TSAdjointCostIntegral - Evaluate the cost integral in the adjoint run.

1615:  Collective on TS

1617:  Input Arguments:
1618:  .  ts - time stepping context

1620:  Level: advanced

1622:  Notes:
1623:  This function cannot be called until TSAdjointStep() has been completed.

1625:  .seealso: TSAdjointSolve(), TSAdjointStep
1626:  @*/
1627: PetscErrorCode TSAdjointCostIntegral(TS ts)
1628: {
1631:     if (!ts->ops->adjointintegral) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"%s does not provide integral evaluation in the adjoint run",((PetscObject)ts)->type_name);
1632:     (*ts->ops->adjointintegral)(ts);
1633:     return(0);
1634: }

1636: /* ------------------ Forward (tangent linear) sensitivity  ------------------*/

1638: /*@
1639:   TSForwardSetUp - Sets up the internal data structures for the later use
1640:   of forward sensitivity analysis

1642:   Collective on TS

1644:   Input Parameter:
1645: . ts - the TS context obtained from TSCreate()

1647:   Level: advanced

1649: .seealso: TSCreate(), TSDestroy(), TSSetUp()
1650: @*/
1651: PetscErrorCode TSForwardSetUp(TS ts)
1652: {

1657:   if (ts->forwardsetupcalled) return(0);
1658:   if (ts->ops->forwardsetup) {
1659:     (*ts->ops->forwardsetup)(ts);
1660:   }
1661:   VecDuplicate(ts->vec_sol,&ts->vec_sensip_col);
1662:   ts->forwardsetupcalled = PETSC_TRUE;
1663:   return(0);
1664: }

1666: /*@
1667:   TSForwardReset - Reset the internal data structures used by forward sensitivity analysis

1669:   Collective on TS

1671:   Input Parameter:
1672: . ts - the TS context obtained from TSCreate()

1674:   Level: advanced

1676: .seealso: TSCreate(), TSDestroy(), TSForwardSetUp()
1677: @*/
1678: PetscErrorCode TSForwardReset(TS ts)
1679: {
1680:   TS             quadts = ts->quadraturets;

1685:   if (ts->ops->forwardreset) {
1686:     (*ts->ops->forwardreset)(ts);
1687:   }
1688:   MatDestroy(&ts->mat_sensip);
1689:   if (quadts) {
1690:     MatDestroy(&quadts->mat_sensip);
1691:   }
1692:   VecDestroy(&ts->vec_sensip_col);
1693:   ts->forward_solve      = PETSC_FALSE;
1694:   ts->forwardsetupcalled = PETSC_FALSE;
1695:   return(0);
1696: }

1698: /*@
1699:   TSForwardSetIntegralGradients - Set the vectors holding forward sensitivities of the integral term.

1701:   Input Parameter:
1702: . ts- the TS context obtained from TSCreate()
1703: . numfwdint- number of integrals
1704: . vp = the vectors containing the gradients for each integral w.r.t. parameters

1706:   Level: deprecated

1708: .seealso: TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1709: @*/
1710: PetscErrorCode TSForwardSetIntegralGradients(TS ts,PetscInt numfwdint,Vec *vp)
1711: {
1714:   if (ts->numcost && ts->numcost!=numfwdint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_USER,"The number of cost functions (2rd parameter of TSSetCostIntegrand()) is inconsistent with the one set by TSSetCostIntegrand()");
1715:   if (!ts->numcost) ts->numcost = numfwdint;

1717:   ts->vecs_integral_sensip = vp;
1718:   return(0);
1719: }

1721: /*@
1722:   TSForwardGetIntegralGradients - Returns the forward sensitivities ofthe integral term.

1724:   Input Parameter:
1725: . ts- the TS context obtained from TSCreate()

1727:   Output Parameter:
1728: . vp = the vectors containing the gradients for each integral w.r.t. parameters

1730:   Level: deprecated

1732: .seealso: TSForwardSetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1733: @*/
1734: PetscErrorCode TSForwardGetIntegralGradients(TS ts,PetscInt *numfwdint,Vec **vp)
1735: {
1739:   if (numfwdint) *numfwdint = ts->numcost;
1740:   if (vp) *vp = ts->vecs_integral_sensip;
1741:   return(0);
1742: }

1744: /*@
1745:   TSForwardStep - Compute the forward sensitivity for one time step.

1747:   Collective on TS

1749:   Input Arguments:
1750: . ts - time stepping context

1752:   Level: advanced

1754:   Notes:
1755:   This function cannot be called until TSStep() has been completed.

1757: .seealso: TSForwardSetSensitivities(), TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardSetUp()
1758: @*/
1759: PetscErrorCode TSForwardStep(TS ts)
1760: {
1763:   if (!ts->ops->forwardstep) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"%s does not provide forward sensitivity analysis",((PetscObject)ts)->type_name);
1764:   PetscLogEventBegin(TS_ForwardStep,ts,0,0,0);
1765:   (*ts->ops->forwardstep)(ts);
1766:   PetscLogEventEnd(TS_ForwardStep,ts,0,0,0);
1767:   return(0);
1768: }

1770: /*@
1771:   TSForwardSetSensitivities - Sets the initial value of the trajectory sensitivities of solution  w.r.t. the problem parameters and initial values.

1773:   Logically Collective on TS

1775:   Input Parameters:
1776: + ts - the TS context obtained from TSCreate()
1777: . nump - number of parameters
1778: - Smat - sensitivities with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

1780:   Level: beginner

1782:   Notes:
1783:   Forward sensitivity is also called 'trajectory sensitivity' in some fields such as power systems.
1784:   This function turns on a flag to trigger TSSolve() to compute forward sensitivities automatically.
1785:   You must call this function before TSSolve().
1786:   The entries in the sensitivity matrix must be correctly initialized with the values S = dy/dp|startingtime.

1788: .seealso: TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1789: @*/
1790: PetscErrorCode TSForwardSetSensitivities(TS ts,PetscInt nump,Mat Smat)
1791: {

1797:   ts->forward_solve  = PETSC_TRUE;
1798:   if (nump == PETSC_DEFAULT) {
1799:     MatGetSize(Smat,NULL,&ts->num_parameters);
1800:   } else ts->num_parameters = nump;
1801:   PetscObjectReference((PetscObject)Smat);
1802:   MatDestroy(&ts->mat_sensip);
1803:   ts->mat_sensip = Smat;
1804:   return(0);
1805: }

1807: /*@
1808:   TSForwardGetSensitivities - Returns the trajectory sensitivities

1810:   Not Collective, but Vec returned is parallel if TS is parallel

1812:   Output Parameter:
1813: + ts - the TS context obtained from TSCreate()
1814: . nump - number of parameters
1815: - Smat - sensitivities with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

1817:   Level: intermediate

1819: .seealso: TSForwardSetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1820: @*/
1821: PetscErrorCode TSForwardGetSensitivities(TS ts,PetscInt *nump,Mat *Smat)
1822: {
1825:   if (nump) *nump = ts->num_parameters;
1826:   if (Smat) *Smat = ts->mat_sensip;
1827:   return(0);
1828: }

1830: /*@
1831:    TSForwardCostIntegral - Evaluate the cost integral in the forward run.

1833:    Collective on TS

1835:    Input Arguments:
1836: .  ts - time stepping context

1838:    Level: advanced

1840:    Notes:
1841:    This function cannot be called until TSStep() has been completed.

1843: .seealso: TSSolve(), TSAdjointCostIntegral()
1844: @*/
1845: PetscErrorCode TSForwardCostIntegral(TS ts)
1846: {
1849:   if (!ts->ops->forwardintegral) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"%s does not provide integral evaluation in the forward run",((PetscObject)ts)->type_name);
1850:   (*ts->ops->forwardintegral)(ts);
1851:   return(0);
1852: }

1854: /*@
1855:   TSForwardSetInitialSensitivities - Set initial values for tangent linear sensitivities

1857:   Collective on TS

1859:   Input Parameter
1860: + ts - the TS context obtained from TSCreate()
1861: - didp - parametric sensitivities of the initial condition

1863:   Level: intermediate

1865:   Notes: TSSolve() allows users to pass the initial solution directly to TS. But the tangent linear variables cannot be initialized in this way. This function is used to set initial values for tangent linear variables.

1867: .seealso: TSForwardSetSensitivities()
1868: @*/
1869: PetscErrorCode TSForwardSetInitialSensitivities(TS ts,Mat didp)
1870: {

1875:   if (!ts->mat_sensip) {
1876:     TSForwardSetSensitivities(ts,PETSC_DEFAULT,didp);
1877:   }
1878:   return(0);
1879: }

1881: /*@
1882:    TSForwardGetStages - Get the number of stages and the tangent linear sensitivities at the intermediate stages

1884:    Input Parameter:
1885: .  ts - the TS context obtained from TSCreate()

1887:    Output Parameters:
1888: +  ns - number of stages
1889: -  S - tangent linear sensitivities at the intermediate stages

1891:    Level: advanced

1893: @*/
1894: PetscErrorCode TSForwardGetStages(TS ts,PetscInt *ns,Mat **S)
1895: {


1901:   if (!ts->ops->getstages) *S=NULL;
1902:   else {
1903:     (*ts->ops->forwardgetstages)(ts,ns,S);
1904:   }
1905:   return(0);
1906: }

1908: /*@
1909:    TSCreateQuadratureTS - Create a sub-TS that evaluates integrals over time

1911:    Input Parameter:
1912: +  ts - the TS context obtained from TSCreate()
1913: -  fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run

1915:    Output Parameters:
1916: .  quadts - the child TS context

1918:    Level: intermediate

1920: .seealso: TSGetQuadratureTS()
1921: @*/
1922: PetscErrorCode TSCreateQuadratureTS(TS ts,PetscBool fwd,TS *quadts)
1923: {
1924:   char prefix[128];

1930:   TSDestroy(&ts->quadraturets);
1931:   TSCreate(PetscObjectComm((PetscObject)ts),&ts->quadraturets);
1932:   PetscObjectIncrementTabLevel((PetscObject)ts->quadraturets,(PetscObject)ts,1);
1933:   PetscLogObjectParent((PetscObject)ts,(PetscObject)ts->quadraturets);
1934:   PetscSNPrintf(prefix,sizeof(prefix),"%squad_",((PetscObject)ts)->prefix ? ((PetscObject)ts)->prefix : "");
1935:   TSSetOptionsPrefix(ts->quadraturets,prefix);
1936:   *quadts = ts->quadraturets;

1938:   if (ts->numcost) {
1939:     VecCreateSeq(PETSC_COMM_SELF,ts->numcost,&(*quadts)->vec_sol);
1940:   } else {
1941:     VecCreateSeq(PETSC_COMM_SELF,1,&(*quadts)->vec_sol);
1942:   }
1943:   ts->costintegralfwd = fwd;
1944:   return(0);
1945: }

1947: /*@
1948:    TSGetQuadratureTS - Return the sub-TS that evaluates integrals over time

1950:    Input Parameter:
1951: .  ts - the TS context obtained from TSCreate()

1953:    Output Parameters:
1954: +  fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run
1955: -  quadts - the child TS context

1957:    Level: intermediate

1959: .seealso: TSCreateQuadratureTS()
1960: @*/
1961: PetscErrorCode TSGetQuadratureTS(TS ts,PetscBool *fwd,TS *quadts)
1962: {
1965:   if (fwd) *fwd = ts->costintegralfwd;
1966:   if (quadts) *quadts = ts->quadraturets;
1967:   return(0);
1968: }