Actual source code: almm.c

petsc-3.15.0 2021-04-05
Report Typos and Errors
  1: #include <../src/tao/constrained/impls/almm/almm.h>
  2: #include <petsctao.h>
  3: #include <petsc/private/petscimpl.h>
  4: #include <petsc/private/vecimpl.h>

  6: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao,Vec,Vec,Vec);
  7: static PetscErrorCode TaoALMMCombineDual_Private(Tao,Vec,Vec,Vec);
  8: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao,Vec,Vec,Vec);
  9: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao);
 10: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao);
 11: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao);

 13: static PetscErrorCode TaoSolve_ALMM(Tao tao)
 14: {
 15:   TAO_ALMM           *auglag = (TAO_ALMM*)tao->data;
 16:   TaoConvergedReason reason;
 17:   PetscReal          updated;
 18:   PetscErrorCode     ierr;

 21:   /* reset initial multiplier/slack guess */
 22:   if (!tao->recycle) {
 23:     if (tao->ineq_constrained) {
 24:       VecZeroEntries(auglag->Ps);
 25:       TaoALMMCombinePrimal_Private(tao, auglag->Px, auglag->Ps, auglag->P);
 26:       VecZeroEntries(auglag->Yi);
 27:     }
 28:     if (tao->eq_constrained) {
 29:       VecZeroEntries(auglag->Ye);
 30:     }
 31:   }

 33:   /* compute initial nonlinear Lagrangian and its derivatives */
 34:   (*auglag->sub_obj)(tao);
 35:   TaoALMMComputeOptimalityNorms_Private(tao);
 36:   /* print initial step and check convergence */
 37:   PetscInfo1(tao,"Solving with %s formulation\n",TaoALMMTypes[auglag->type]);
 38:   TaoLogConvergenceHistory(tao, auglag->Lval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
 39:   TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, 0.0);
 40:   (*tao->ops->convergencetest)(tao, tao->cnvP);
 41:   /* set initial penalty factor and inner solver tolerance */
 42:   switch (auglag->type) {
 43:     case (TAO_ALMM_CLASSIC):
 44:       auglag->mu = auglag->mu0;
 45:       break;
 46:     case (TAO_ALMM_PHR):
 47:       auglag->cenorm = 0.0;
 48:       if (tao->eq_constrained) {
 49:         VecDot(auglag->Ce, auglag->Ce, &auglag->cenorm);
 50:       }
 51:       auglag->cinorm = 0.0;
 52:       if (tao->ineq_constrained) {
 53:         VecCopy(auglag->Ci, auglag->Ciwork);
 54:         VecScale(auglag->Ciwork, -1.0);
 55:         VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
 56:         VecDot(auglag->Ciwork, auglag->Ciwork, &auglag->cinorm);
 57:       }
 58:       /* determine initial penalty factor based on the balance of constraint violation and objective function value */
 59:       auglag->mu = PetscMax(1.e-6, PetscMin(10.0, 2.0*PetscAbsReal(auglag->fval)/(auglag->cenorm + auglag->cinorm)));
 60:       break;
 61:     default:
 62:       break;
 63:   }
 64:   auglag->gtol = auglag->gtol0;
 65:   PetscInfo1(tao,"Initial penalty: %.2f\n",auglag->mu);

 67:   /* start aug-lag outer loop */
 68:   while (tao->reason == TAO_CONTINUE_ITERATING) {
 69:     ++tao->niter;
 70:     /* update subsolver tolerance */
 71:     PetscInfo1(tao,"Subsolver tolerance: ||G|| <= %e\n",auglag->gtol);
 72:     TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
 73:     /* solve the bound-constrained or unconstrained subproblem */
 74:     TaoSolve(auglag->subsolver);
 75:     TaoGetConvergedReason(auglag->subsolver, &reason);
 76:     tao->ksp_its += auglag->subsolver->ksp_its;
 77:     if (reason != TAO_CONVERGED_GATOL) {
 78:       PetscInfo1(tao,"Subsolver failed to converge, reason: %s\n",TaoConvergedReasons[reason]);
 79:     }
 80:     /* evaluate solution and test convergence */
 81:     (*auglag->sub_obj)(tao);
 82:     TaoALMMComputeOptimalityNorms_Private(tao);
 83:     /* decide whether to update multipliers or not */
 84:     updated = 0.0;
 85:     if (auglag->cnorm <= auglag->ytol) {
 86:       PetscInfo1(tao,"Multipliers updated: ||C|| <= %e\n",auglag->ytol);
 87:       /* constraints are good, update multipliers and convergence tolerances */
 88:       if (tao->eq_constrained) {
 89:         VecAXPY(auglag->Ye, auglag->mu, auglag->Ce);
 90:         VecSet(auglag->Cework, auglag->ye_max);
 91:         VecPointwiseMin(auglag->Ye, auglag->Cework, auglag->Ye);
 92:         VecSet(auglag->Cework, auglag->ye_min);
 93:         VecPointwiseMax(auglag->Ye, auglag->Cework, auglag->Ye);
 94:       }
 95:       if (tao->ineq_constrained) {
 96:         VecAXPY(auglag->Yi, auglag->mu, auglag->Ci);
 97:         VecSet(auglag->Ciwork, auglag->yi_max);
 98:         VecPointwiseMin(auglag->Yi, auglag->Ciwork, auglag->Yi);
 99:         VecSet(auglag->Ciwork, auglag->yi_min);
100:         VecPointwiseMax(auglag->Yi, auglag->Ciwork, auglag->Yi);
101:       }
102:       /* tolerances are updated only for non-PHR methods */
103:       if (auglag->type != TAO_ALMM_PHR) {
104:         auglag->ytol = PetscMax(tao->catol, auglag->ytol/PetscPowReal(auglag->mu, auglag->mu_pow_good));
105:         auglag->gtol = PetscMax(tao->gatol, auglag->gtol/auglag->mu);
106:       }
107:       updated = 1.0;
108:     } else {
109:       /* constraints are bad, update penalty factor */
110:       auglag->mu = PetscMin(auglag->mu_max, auglag->mu_fac*auglag->mu);
111:       /* tolerances are reset only for non-PHR methods */
112:       if (auglag->type != TAO_ALMM_PHR) {
113:         auglag->ytol = PetscMax(tao->catol, 0.1/PetscPowReal(auglag->mu, auglag->mu_pow_bad));
114:         auglag->gtol = PetscMax(tao->gatol, 1.0/auglag->mu);
115:       }
116:       PetscInfo1(tao,"Penalty increased: mu = %.2f\n",auglag->mu);
117:     }
118:     TaoLogConvergenceHistory(tao, auglag->fval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
119:     TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, updated);
120:     (*tao->ops->convergencetest)(tao, tao->cnvP);
121:   }

123:   return(0);
124: }

126: static PetscErrorCode TaoView_ALMM(Tao tao,PetscViewer viewer)
127: {
128:   TAO_ALMM       *auglag = (TAO_ALMM*)tao->data;
129:   PetscBool      isascii;

133:   PetscViewerASCIIPushTab(viewer);
134:   TaoView(auglag->subsolver,viewer);
135:   PetscViewerASCIIPopTab(viewer);
136:   PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
137:   if (isascii) {
138:     PetscViewerASCIIPushTab(viewer);
139:     PetscViewerASCIIPrintf(viewer, "ALMM Formulation Type: %s\n", TaoALMMTypes[auglag->type]);
140:     PetscViewerASCIIPopTab(viewer);
141:   }
142:   return(0);
143: }

145: static PetscErrorCode TaoSetUp_ALMM(Tao tao)
146: {
147:   TAO_ALMM             *auglag = (TAO_ALMM*)tao->data;
148:   VecType              vec_type;
149:   Vec                  SL, SU;
150:   PetscBool            is_cg = PETSC_FALSE, is_lmvm = PETSC_FALSE;
151:   PetscErrorCode       ierr;

154:   if (tao->ineq_doublesided) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_WRONGSTATE, "TAOALMM does not support double-sided inequality constraint definition. Please restructure your inequality constrainst to fit the form c(x) >= 0.");
155:   if (!tao->eq_constrained && !tao->ineq_constrained) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ORDER, "Equality and/or inequality constraints must be defined before solver setup.");
156:   TaoComputeVariableBounds(tao);
157:   /* alias base vectors and create extras */
158:   VecGetType(tao->solution, &vec_type);
159:   auglag->Px = tao->solution;
160:   if (!tao->gradient) { /* base gradient */
161:     VecDuplicate(tao->solution, &tao->gradient);
162:   }
163:   auglag->LgradX = tao->gradient;
164:   if (!auglag->Xwork) { /* opt var work vector */
165:     VecDuplicate(tao->solution, &auglag->Xwork);
166:   }
167:   if (tao->eq_constrained) {
168:     auglag->Ce = tao->constraints_equality;
169:     auglag->Ae = tao->jacobian_equality;
170:     if (!auglag->Ye) { /* equality multipliers */
171:       VecDuplicate(auglag->Ce, &auglag->Ye);
172:     }
173:     if (!auglag->Cework) {
174:       VecDuplicate(auglag->Ce, &auglag->Cework);
175:     }
176:   }
177:   if (tao->ineq_constrained) {
178:     auglag->Ci = tao->constraints_inequality;
179:     auglag->Ai = tao->jacobian_inequality;
180:     if (!auglag->Yi) { /* inequality multipliers */
181:       VecDuplicate(auglag->Ci, &auglag->Yi);
182:     }
183:     if (!auglag->Ciwork) {
184:       VecDuplicate(auglag->Ci, &auglag->Ciwork);
185:     }
186:     if (!auglag->Cizero) {
187:       VecDuplicate(auglag->Ci, &auglag->Cizero);
188:       VecZeroEntries(auglag->Cizero);
189:     }
190:     if (!auglag->Ps) { /* slack vars */
191:       VecDuplicate(auglag->Ci, &auglag->Ps);
192:     }
193:     if (!auglag->LgradS) { /* slack component of Lagrangian gradient */
194:       VecDuplicate(auglag->Ci, &auglag->LgradS);
195:     }
196:     /* create vector for combined primal space and the associated communication objects */
197:     if (!auglag->P) {
198:       PetscMalloc1(2, &auglag->Parr);
199:       auglag->Parr[0] = auglag->Px; auglag->Parr[1] = auglag->Ps;
200:       VecConcatenate(2, auglag->Parr, &auglag->P, &auglag->Pis);
201:       PetscMalloc1(2, &auglag->Pscatter);
202:       VecScatterCreate(auglag->P, auglag->Pis[0], auglag->Px, NULL, &auglag->Pscatter[0]);
203:       VecScatterCreate(auglag->P, auglag->Pis[1], auglag->Ps, NULL, &auglag->Pscatter[1]);
204:     }
205:     if (tao->eq_constrained) {
206:       /* create vector for combined dual space and the associated communication objects */
207:       if (!auglag->Y) {
208:         PetscMalloc1(2, &auglag->Yarr);
209:         auglag->Yarr[0] = auglag->Ye; auglag->Yarr[1] = auglag->Yi;
210:         VecConcatenate(2, auglag->Yarr, &auglag->Y, &auglag->Yis);
211:         PetscMalloc1(2, &auglag->Yscatter);
212:         VecScatterCreate(auglag->Y, auglag->Yis[0], auglag->Ye, NULL, &auglag->Yscatter[0]);
213:         VecScatterCreate(auglag->Y, auglag->Yis[1], auglag->Yi, NULL, &auglag->Yscatter[1]);
214:       }
215:       if (!auglag->C) {
216:         VecDuplicate(auglag->Y, &auglag->C);
217:       }
218:     } else {
219:       if (!auglag->C) {
220:         auglag->C = auglag->Ci;
221:       }
222:       if (!auglag->Y) {
223:         auglag->Y = auglag->Yi;
224:       }
225:     }
226:   } else {
227:     if (!auglag->P) {
228:       auglag->P = auglag->Px;
229:     }
230:     if (!auglag->G) {
231:       auglag->G = auglag->LgradX;
232:     }
233:     if (!auglag->C) {
234:       auglag->C = auglag->Ce;
235:     }
236:     if (!auglag->Y) {
237:       auglag->Y = auglag->Ye;
238:     }
239:   }
240:   /* initialize parameters */
241:   if (auglag->type == TAO_ALMM_PHR) {
242:     auglag->mu_fac = 10.0;
243:     auglag->yi_min = 0.0;
244:     auglag->ytol0 = 0.5;
245:     auglag->gtol0 = tao->gatol;
246:     if (tao->gatol_changed && tao->catol_changed) {
247:       PetscInfo(tao,"TAOALMM with PHR: different gradient and constraint tolerances are not supported, setting catol = gatol\n");
248:       tao->catol = tao->gatol;
249:     }
250:   }
251:   /* set the Lagrangian formulation type for the subsolver */
252:   switch (auglag->type) {
253:     case (TAO_ALMM_CLASSIC):
254:       auglag->sub_obj = TaoALMMComputeAugLagAndGradient_Private;
255:       break;
256:     case (TAO_ALMM_PHR):
257:       auglag->sub_obj = TaoALMMComputePHRLagAndGradient_Private;
258:       break;
259:     default:
260:       break;
261:   }
262:   /* set up the subsolver */
263:   TaoSetInitialVector(auglag->subsolver, auglag->P);
264:   TaoSetObjectiveAndGradientRoutine(auglag->subsolver, TaoALMMSubsolverObjectiveAndGradient_Private, (void*)auglag);
265:   if (tao->bounded) {
266:     /* make sure that the subsolver is a bound-constrained method */
267:     PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOCG, &is_cg);
268:     PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOLMVM, &is_lmvm);
269:     if (is_cg) {
270:       TaoSetType(auglag->subsolver, TAOBNCG);
271:       PetscInfo(tao,"TAOCG detected for bound-constrained problem, switching to TAOBNCG instead.");
272:     }
273:     if (is_lmvm) {
274:       TaoSetType(auglag->subsolver, TAOBQNLS);
275:       PetscInfo(tao,"TAOLMVM detected for bound-constrained problem, switching to TAOBQNLS instead.");
276:     }
277:     /* create lower and upper bound clone vectors for subsolver */
278:     if (!auglag->PL) {
279:       VecDuplicate(auglag->P, &auglag->PL);
280:     }
281:     if (!auglag->PU) {
282:       VecDuplicate(auglag->P, &auglag->PU);
283:     }
284:     if (tao->ineq_constrained) {
285:       /* create lower and upper bounds for slack, set lower to 0 */
286:       VecDuplicate(auglag->Ci, &SL);
287:       VecSet(SL, 0.0);
288:       VecDuplicate(auglag->Ci, &SU);
289:       VecSet(SU, PETSC_INFINITY);
290:       /* combine opt var bounds with slack bounds */
291:       TaoALMMCombinePrimal_Private(tao, tao->XL, SL, auglag->PL);
292:       TaoALMMCombinePrimal_Private(tao, tao->XU, SU, auglag->PU);
293:       /* destroy work vectors */
294:       VecDestroy(&SL);
295:       VecDestroy(&SU);
296:     } else {
297:       /* no inequality constraints, just copy bounds into the subsolver */
298:       VecCopy(tao->XL, auglag->PL);
299:       VecCopy(tao->XU, auglag->PU);
300:     }
301:     TaoSetVariableBounds(auglag->subsolver, auglag->PL, auglag->PU);
302:   }
303:   TaoSetUp(auglag->subsolver);
304:   auglag->G = auglag->subsolver->gradient;

306:   return(0);
307: }

309: static PetscErrorCode TaoDestroy_ALMM(Tao tao)
310: {
311:   TAO_ALMM       *auglag = (TAO_ALMM*)tao->data;

315:   TaoDestroy(&auglag->subsolver);
316:   if (tao->setupcalled) {
317:     VecDestroy(&auglag->Xwork);              /* opt work */
318:     if (tao->eq_constrained) {
319:       VecDestroy(&auglag->Ye);               /* equality multipliers */
320:       VecDestroy(&auglag->Cework);           /* equality work vector */
321:     }
322:     if (tao->ineq_constrained) {
323:       VecDestroy(&auglag->Ps);               /* slack vars */
324:       auglag->Parr[0] = NULL;                                     /* clear pointer to tao->solution, will be destroyed by TaoDestroy() shell */
325:       PetscFree(auglag->Parr);               /* array of primal vectors */
326:       VecDestroy(&auglag->LgradS);           /* slack grad */
327:       VecDestroy(&auglag->Cizero);           /* zero vector for pointwise max */
328:       VecDestroy(&auglag->Yi);               /* inequality multipliers */
329:       VecDestroy(&auglag->Ciwork);           /* inequality work vector */
330:       VecDestroy(&auglag->P);                /* combo primal */
331:       ISDestroy(&auglag->Pis[0]);            /* index set for X inside P */
332:       ISDestroy(&auglag->Pis[1]);            /* index set for S inside P */
333:       PetscFree(auglag->Pis);                /* array of P index sets */
334:       VecScatterDestroy(&auglag->Pscatter[0]);
335:       VecScatterDestroy(&auglag->Pscatter[1]);
336:       PetscFree(auglag->Pscatter);
337:       if (tao->eq_constrained) {
338:         VecDestroy(&auglag->Y);              /* combo multipliers */
339:         PetscFree(auglag->Yarr);             /* array of dual vectors */
340:         VecDestroy(&auglag->C);              /* combo constraints */
341:         ISDestroy(&auglag->Yis[0]);          /* index set for Ye inside Y */
342:         ISDestroy(&auglag->Yis[1]);          /* index set for Yi inside Y */
343:         PetscFree(auglag->Yis);
344:         VecScatterDestroy(&auglag->Yscatter[0]);
345:         VecScatterDestroy(&auglag->Yscatter[1]);
346:         PetscFree(auglag->Yscatter);
347:       }
348:     }
349:     if (tao->bounded) {
350:       VecDestroy(&auglag->PL);                /* lower bounds for subsolver */
351:       VecDestroy(&auglag->PU);                /* upper bounds for subsolver */
352:     }
353:   }
354:   PetscFree(tao->data);
355:   return(0);
356: }

358: static PetscErrorCode TaoSetFromOptions_ALMM(PetscOptionItems *PetscOptionsObject,Tao tao)
359: {
360:   TAO_ALMM     *auglag = (TAO_ALMM*)tao->data;
361:   PetscInt       i;
362:   PetscBool      compatible;

366:   PetscOptionsHead(PetscOptionsObject,"Augmented Lagrangian multipler method solves problems with general constraints by converting them into a sequence of unconstrained problems.");
367:   PetscOptionsReal("-tao_almm_mu_init","initial penalty parameter","",auglag->mu0,&auglag->mu0,NULL);
368:   PetscOptionsReal("-tao_almm_mu_factor","increase factor for the penalty parameter","",auglag->mu_fac,&auglag->mu_fac,NULL);
369:   PetscOptionsReal("-tao_almm_mu_power_good","exponential for penalty parameter when multiplier update is accepted","",auglag->mu_pow_good,&auglag->mu_pow_good,NULL);
370:   PetscOptionsReal("-tao_almm_mu_power_bad","exponential for penalty parameter when multiplier update is rejected","",auglag->mu_pow_bad,&auglag->mu_pow_bad,NULL);
371:   PetscOptionsReal("-tao_almm_mu_max","maximum safeguard for penalty parameter updates","",auglag->mu_max,&auglag->mu_max,NULL);
372:   PetscOptionsReal("-tao_almm_ye_min","minimum safeguard for equality multiplier updates","",auglag->ye_min,&auglag->ye_min,NULL);
373:   PetscOptionsReal("-tao_almm_ye_max","maximum safeguard for equality multipliers updates","",auglag->ye_max,&auglag->ye_max,NULL);
374:   PetscOptionsReal("-tao_almm_yi_min","minimum safeguard for inequality multipliers updates","",auglag->yi_min,&auglag->yi_min,NULL);
375:   PetscOptionsReal("-tao_almm_yi_max","maximum safeguard for inequality multipliers updates","",auglag->yi_max,&auglag->yi_max,NULL);
376:   PetscOptionsEnum("-tao_almm_type","augmented Lagrangian formulation type for the subproblem","TaoALMMType",TaoALMMTypes,(PetscEnum)auglag->type,(PetscEnum*)&auglag->type,NULL);
377:   PetscOptionsTail();
378:   TaoSetFromOptions(auglag->subsolver);
379:   PetscObjectTypeCompareAny((PetscObject)(auglag->subsolver), &compatible, TAOCG, TAOLMVM, TAOBNCG, TAOBQNLS, TAOBQNKLS, TAOBQNKTR, TAOBQNKTL, "");
380:   if (!compatible) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "Subsolver must be a first-order method (TAOCG, TABNCG, TAOLMVM, TAOBQN-family");
381:   for (i=0; i<tao->numbermonitors; i++) {
382:     PetscObjectReference((PetscObject)tao->monitorcontext[i]);
383:     TaoSetMonitor(auglag->subsolver, tao->monitor[i], tao->monitorcontext[i], tao->monitordestroy[i]);
384:     if (tao->monitor[i] == TaoMonitorDefault || tao->monitor[i] == TaoDefaultCMonitor || tao->monitor[i] == TaoDefaultGMonitor || tao->monitor[i] == TaoDefaultSMonitor) {
385:       auglag->info = PETSC_TRUE;
386:     }
387:   }
388:   return(0);
389: }

391: /* -------------------------------------------------------- */

393: /*MC
394:   TaoALMM - Augmented Lagrangian multiplier method for solving nonlinear optimization problems with general constraints.

396:   Options Database Keys:
397: + -tao_almm_mu_init <real>              - initial penalty parameter (default: 10.)
398: . -tao_almm_mu_factor <real>            - increase factor for the penalty parameter (default: 100.)
399: . -tao_almm_mu_max <real>               - maximum safeguard for penalty parameter updates (default: 1.e20)
400: . -tao_almm_mu_power_good <real>        - exponential for penalty parameter when multiplier update is accepted (default: 0.9)
401: . -tao_almm_mu_power_bad <real>         - exponential for penalty parameter when multiplier update is rejected (default: 0.1)
402: . -tao_almm_ye_min <real>               - minimum safeguard for equality multiplier updates (default: -1.e20)
403: . -tao_almm_ye_max <real>               - maximum safeguard for equality multiplier updates (default: 1.e20)
404: . -tao_almm_yi_min <real>               - minimum safeguard for inequality multiplier updates (default: -1.e20)
405: . -tao_almm_yi_max <real>               - maximum safeguard for inequality multiplier updates (default: 1.e20)
406: - -tao_almm_type <classic,phr>          - change formulation of the augmented Lagrangian merit function for the subproblem (default: classic)

408:   Level: beginner

410:   Notes:
411:   This method converts a constrained problem into a sequence of unconstrained problems via the augmented
412:   Lagrangian merit function. Bound constraints are pushed down to the subproblem without any modifications.

414:   Two formulations are offered for the subproblem: canonical Hestenes-Powell augmented Lagrangian with slack
415:   variables for inequality constraints, and a slack-less Powell-Hestenes-Rockafellar (PHR) formulation utilizing a
416:   pointwise max() penalty on inequality constraints. The canonical augmented Lagrangian formulation typically
417:   converges faster for most problems. However, PHR may be desirable for problems featuring a large number
418:   of inequality constraints because it avoids inflating the size of the subproblem with slack variables.

420:   The subproblem is solved using a nested first-order TAO solver. The user can retrieve a pointer to
421:   the subsolver via TaoALMMGetSubsolver() or pass command line arguments to it using the
422:   "-tao_almm_subsolver_" prefix. Currently, TaoALMM does not support second-order methods for the
423:   subproblem. It is also highly recommended that the subsolver chosen by the user utilize a trust-region
424:   strategy for globalization (default: TAOBQNKTR) especially if the outer problem features bound constraints.

426: .vb
427:   while unconverged
428:     solve argmin_x L(x) s.t. l <= x <= u
429:     if ||c|| <= y_tol
430:       if ||c|| <= c_tol && ||Lgrad|| <= g_tol:
431:         problem converged, return solution
432:       else
433:         constraints sufficiently improved
434:         update multipliers and tighten tolerances
435:       endif
436:     else
437:       constraints did not improve
438:       update penalty and loosen tolerances
439:     endif
440:   endwhile
441: .ve

443: .seealso: TaoALMMGetType(), TaoALMMSetType(), TaoALMMSetSubsolver(), TaoALMMGetSubsolver(),
444:           TaoALMMGetMultipliers(), TaoALMMSetMultipliers(), TaoALMMGetPrimalIS(), TaoALMMGetDualIS()
445: M*/
446: PETSC_EXTERN PetscErrorCode TaoCreate_ALMM(Tao tao)
447: {
448:   TAO_ALMM         *auglag;
449:   PetscErrorCode   ierr;

452:   PetscNewLog(tao, &auglag);

454:   tao->ops->destroy        = TaoDestroy_ALMM;
455:   tao->ops->setup          = TaoSetUp_ALMM;
456:   tao->ops->setfromoptions = TaoSetFromOptions_ALMM;
457:   tao->ops->view           = TaoView_ALMM;
458:   tao->ops->solve          = TaoSolve_ALMM;

460:   tao->gatol = 1.e-5;
461:   tao->grtol = 0.0;
462:   tao->gttol = 0.0;
463:   tao->catol = 1.e-5;
464:   tao->crtol = 0.0;

466:   tao->data           = (void*)auglag;
467:   auglag->parent      = tao;
468:   auglag->mu0         = 10.0;
469:   auglag->mu          = auglag->mu0;
470:   auglag->mu_fac      = 10.0;
471:   auglag->mu_max      = PETSC_INFINITY;
472:   auglag->mu_pow_good = 0.9;
473:   auglag->mu_pow_bad  = 0.1;
474:   auglag->ye_min      = PETSC_NINFINITY;
475:   auglag->ye_max      = PETSC_INFINITY;
476:   auglag->yi_min      = PETSC_NINFINITY;
477:   auglag->yi_max      = PETSC_INFINITY;
478:   auglag->ytol0       = 0.1/PetscPowReal(auglag->mu0, auglag->mu_pow_bad);
479:   auglag->ytol        = auglag->ytol0;
480:   auglag->gtol0       = 1.0/auglag->mu0;
481:   auglag->gtol        = auglag->gtol0;

483:   auglag->sub_obj     = TaoALMMComputeAugLagAndGradient_Private;
484:   auglag->type        = TAO_ALMM_CLASSIC;
485:   auglag->info        = PETSC_FALSE;

487:   TaoCreate(PetscObjectComm((PetscObject)tao),&auglag->subsolver);
488:   TaoSetType(auglag->subsolver, TAOBQNKTR);
489:   TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
490:   TaoSetMaximumIterations(auglag->subsolver, 1000);
491:   TaoSetMaximumFunctionEvaluations(auglag->subsolver, 10000);
492:   TaoSetFunctionLowerBound(auglag->subsolver, PETSC_NINFINITY);
493:   TaoSetOptionsPrefix(auglag->subsolver,"tao_almm_subsolver_");
494:   PetscObjectIncrementTabLevel((PetscObject)auglag->subsolver,(PetscObject)tao,1);

496:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetType_C", TaoALMMGetType_Private);
497:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetType_C", TaoALMMSetType_Private);
498:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetSubsolver_C", TaoALMMGetSubsolver_Private);
499:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetSubsolver_C", TaoALMMSetSubsolver_Private);
500:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetMultipliers_C", TaoALMMGetMultipliers_Private);
501:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetMultipliers_C", TaoALMMSetMultipliers_Private);
502:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetPrimalIS_C", TaoALMMGetPrimalIS_Private);
503:   PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetDualIS_C", TaoALMMGetDualIS_Private);
504:   return(0);
505: }

507: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao tao, Vec X, Vec S, Vec P)
508: {
509:   TAO_ALMM     *auglag = (TAO_ALMM*)tao->data;

513:   if (tao->ineq_constrained) {
514:     VecScatterBegin(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
515:     VecScatterEnd(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
516:     VecScatterBegin(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
517:     VecScatterEnd(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
518:   } else {
519:     VecCopy(X, P);
520:   }
521:   return(0);
522: }

524: static PetscErrorCode TaoALMMCombineDual_Private(Tao tao, Vec EQ, Vec IN, Vec Y)
525: {
526:   TAO_ALMM     *auglag = (TAO_ALMM*)tao->data;

530:   if (tao->eq_constrained) {
531:     if (tao->ineq_constrained) {
532:       VecScatterBegin(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
533:       VecScatterEnd(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
534:       VecScatterBegin(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
535:       VecScatterEnd(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
536:     } else {
537:       VecCopy(EQ, Y);
538:     }
539:   } else {
540:     VecCopy(IN, Y);
541:   }
542:   return(0);
543: }

545: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao tao, Vec P, Vec X, Vec S)
546: {
547:   TAO_ALMM     *auglag = (TAO_ALMM*)tao->data;

551:   if (tao->ineq_constrained) {
552:     VecScatterBegin(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
553:     VecScatterEnd(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
554:     VecScatterBegin(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
555:     VecScatterEnd(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
556:   } else {
557:     VecCopy(P, X);
558:   }
559:   return(0);
560: }

562: /* this assumes that the latest constraints are stored in Ce and Ci, and also combined in C */
563: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao tao)
564: {
565:   TAO_ALMM     *auglag = (TAO_ALMM*)tao->data;

569:   /* if bounded, project the gradient */
570:   if (tao->bounded) {
571:     VecBoundGradientProjection(auglag->LgradX, auglag->Px, tao->XL, tao->XU, auglag->LgradX);
572:   }
573:   if (auglag->type == TAO_ALMM_PHR) {
574:     VecNorm(auglag->LgradX, NORM_INFINITY, &auglag->gnorm);
575:     auglag->cenorm = 0.0;
576:     if (tao->eq_constrained) {
577:       VecNorm(auglag->Ce, NORM_INFINITY, &auglag->cenorm);
578:     }
579:     auglag->cinorm = 0.0;
580:     if (tao->ineq_constrained) {
581:       VecCopy(auglag->Yi, auglag->Ciwork);
582:       VecScale(auglag->Ciwork, -1.0/auglag->mu);
583:       VecPointwiseMax(auglag->Ciwork, auglag->Ci, auglag->Ciwork);
584:       VecNorm(auglag->Ciwork, NORM_INFINITY, &auglag->cinorm);
585:     }
586:     auglag->cnorm_old = auglag->cnorm;
587:     auglag->cnorm = PetscMax(auglag->cenorm, auglag->cinorm);
588:     auglag->ytol = auglag->ytol0 * auglag->cnorm_old;
589:   } else {
590:     VecNorm(auglag->LgradX, NORM_2, &auglag->gnorm);
591:     VecNorm(auglag->C, NORM_2, &auglag->cnorm);
592:   }
593:   return(0);
594: }

596: static PetscErrorCode TaoALMMEvaluateIterate_Private(Tao tao, Vec P)
597: {
598:   TAO_ALMM       *auglag = (TAO_ALMM*)tao->data;

602:   /* split solution into primal and slack components */
603:   TaoALMMSplitPrimal_Private(tao, auglag->P, auglag->Px, auglag->Ps);

605:   /* compute f, df/dx and the constraints */
606:   TaoComputeObjectiveAndGradient(tao, auglag->Px, &auglag->fval, auglag->LgradX);
607:   if (tao->eq_constrained) {
608:     TaoComputeEqualityConstraints(tao, auglag->Px, auglag->Ce);
609:     TaoComputeJacobianEquality(tao, auglag->Px, auglag->Ae, auglag->Ae);
610:   }
611:   if (tao->ineq_constrained) {
612:     TaoComputeInequalityConstraints(tao, auglag->Px, auglag->Ci);
613:     TaoComputeJacobianInequality(tao, auglag->Px, auglag->Ai, auglag->Ai);
614:     switch (auglag->type) {
615:       case (TAO_ALMM_CLASSIC):
616:         /* classic formulation converts inequality to equality constraints via slack variables */
617:         VecAXPY(auglag->Ci, -1.0, auglag->Ps);
618:         break;
619:       case (TAO_ALMM_PHR):
620:         /* PHR is based on Ci <= 0 while TAO defines Ci >= 0 so we hit it with a negative sign */
621:         VecScale(auglag->Ci, -1.0);
622:         MatScale(auglag->Ai, -1.0);
623:         break;
624:       default:
625:         break;
626:     }
627:   }
628:   /* combine constraints into one vector */
629:   TaoALMMCombineDual_Private(tao, auglag->Ce, auglag->Ci, auglag->C);
630:   return(0);
631: }

633: /*
634: Lphr = f + 0.5*mu*[ (Ce + Ye/mu)^T (Ce + Ye/mu) + pmin(0, Ci + Yi/mu)^T pmin(0, Ci + Yi/mu)]

636: dLphr/dX = dF/dX + mu*[ (Ce + Ye/mu)^T Ae + pmin(0, Ci + Yi/mu)^T Ai]

638: dLphr/dS = 0
639: */
640: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao tao)
641: {
642:   TAO_ALMM       *auglag = (TAO_ALMM*)tao->data;
643:   PetscReal      eq_norm=0.0, ineq_norm=0.0;

647:   TaoALMMEvaluateIterate_Private(tao, auglag->P);
648:   if (tao->eq_constrained) {
649:     /* Ce_work = mu*(Ce + Ye/mu) */
650:     VecWAXPY(auglag->Cework, 1.0/auglag->mu, auglag->Ye, auglag->Ce);
651:     VecDot(auglag->Cework, auglag->Cework, &eq_norm); /* contribution to scalar Lagrangian */
652:     VecScale(auglag->Cework, auglag->mu);
653:     /* dL/dX += mu*(Ce + Ye/mu)^T Ae */
654:     MatMultTransposeAdd(auglag->Ae, auglag->Cework, auglag->LgradX, auglag->LgradX);
655:   }
656:   if (tao->ineq_constrained) {
657:     /* Ci_work = mu * pmax(0, Ci + Yi/mu) where pmax() is pointwise max() */
658:     VecWAXPY(auglag->Ciwork, 1.0/auglag->mu, auglag->Yi, auglag->Ci);
659:     VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
660:     VecDot(auglag->Ciwork, auglag->Ciwork, &ineq_norm); /* contribution to scalar Lagrangian */
661:     /* dL/dX += mu * pmax(0, Ci + Yi/mu)^T Ai */
662:     VecScale(auglag->Ciwork, auglag->mu);
663:     MatMultTransposeAdd(auglag->Ai, auglag->Ciwork, auglag->LgradX, auglag->LgradX);
664:     /* dL/dS = 0 becuase there are no slacks in PHR */
665:     VecZeroEntries(auglag->LgradS);
666:   }
667:   /* combine gradient together */
668:   TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
669:   /* compute L = f + 0.5 * mu * [(Ce + Ye/mu)^T (Ce + Ye/mu) + pmax(0, Ci + Yi/mu)^T pmax(0, Ci + Yi/mu)] */
670:   auglag->Lval = auglag->fval + 0.5*auglag->mu*(eq_norm + ineq_norm);
671:   return(0);
672: }

674: /*
675: Lc = F + Ye^TCe + Yi^T(Ci - S) + 0.5*mu*[Ce^TCe + (Ci - S)^T(Ci - S)]

677: dLc/dX = dF/dX + Ye^TAe + Yi^TAi + 0.5*mu*[Ce^TAe + (Ci - S)^TAi]

679: dLc/dS = -[Yi + mu*(Ci - S)]
680: */
681: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao tao)
682: {
683:   TAO_ALMM       *auglag = (TAO_ALMM*)tao->data;
684:   PetscReal      yeTce=0.0, yiTcims=0.0, ceTce=0.0, cimsTcims=0.0;

688:   TaoALMMEvaluateIterate_Private(tao, auglag->P);
689:   if (tao->eq_constrained) {
690:     /* compute scalar contributions */
691:     VecDot(auglag->Ye, auglag->Ce, &yeTce);
692:     VecDot(auglag->Ce, auglag->Ce, &ceTce);
693:     /* dL/dX += ye^T Ae */
694:     MatMultTransposeAdd(auglag->Ae, auglag->Ye, auglag->LgradX, auglag->LgradX);
695:     /* dL/dX += mu * ce^T Ae */
696:     MatMultTranspose(auglag->Ae, auglag->Ce, auglag->Xwork);
697:     VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
698:   }
699:   if (tao->ineq_constrained) {
700:     /* compute scalar contributions */
701:     VecDot(auglag->Yi, auglag->Ci, &yiTcims);
702:     VecDot(auglag->Ci, auglag->Ci, &cimsTcims);
703:     /* dL/dX += yi^T Ai */
704:     MatMultTransposeAdd(auglag->Ai, auglag->Yi, auglag->LgradX, auglag->LgradX);
705:     /* dL/dX += mu * (ci - s)^T Ai */
706:     MatMultTranspose(auglag->Ai, auglag->Ci, auglag->Xwork);
707:     VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
708:     /* dL/dS = -[yi + mu*(ci - s)] */
709:     VecWAXPY(auglag->LgradS, auglag->mu, auglag->Ci, auglag->Yi);
710:     VecScale(auglag->LgradS, -1.0);
711:   }
712:   /* combine gradient together */
713:   TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
714:   /* compute L = f + ye^T ce + yi^T (ci - s) + 0.5*mu*||ce||^2 + 0.5*mu*||ci - s||^2 */
715:   auglag->Lval = auglag->fval + yeTce + yiTcims + 0.5*auglag->mu*(ceTce + cimsTcims);
716:   return(0);
717: }

719: PetscErrorCode TaoALMMSubsolverObjectiveAndGradient_Private(Tao tao, Vec P, PetscReal *Lval, Vec G, void *ctx)
720: {
721:   TAO_ALMM     *auglag = (TAO_ALMM*)ctx;

725:   VecCopy(P, auglag->P);
726:   (*auglag->sub_obj)(auglag->parent);
727:   VecCopy(auglag->G, G);
728:   *Lval = auglag->Lval;
729:   return(0);
730: }