Merge lp:~libadjoint/dolfin-adjoint/improved-optimisation into lp:dolfin-adjoint

Proposed by Simon Funke
Status: Merged
Merged at revision: 408
Proposed branch: lp:~libadjoint/dolfin-adjoint/improved-optimisation
Merge into: lp:dolfin-adjoint
Diff against target: 833 lines (+413/-136)
14 files modified
dolfin_adjoint/__init__.py (+2/-1)
dolfin_adjoint/optimization.py (+199/-94)
dolfin_adjoint/options.py (+1/-1)
dolfin_adjoint/parameter.py (+10/-0)
dolfin_adjoint/reduced_functional.py (+79/-0)
dolfin_adjoint/solving.py (+2/-0)
dolfin_adjoint/ui.py (+2/-0)
dolfin_adjoint/utils.py (+20/-9)
tests/burgers_oo/burgers_oo.py (+4/-3)
tests/optimal_control_mms/optimal_control_mms.py (+11/-6)
tests/optimization/optimization.py (+22/-12)
tests/optimization_scalar/optimization_scalar.py (+6/-9)
tests/reduced_functional_evaluation/reduced_functional_evaluation.py (+54/-0)
tests/test.py (+1/-1)
To merge this branch: bzr merge lp:~libadjoint/dolfin-adjoint/improved-optimisation
Reviewer Review Type Date Requested Status
David Ham Pending
Review via email: mp+119247@code.launchpad.net

Description of the change

This branch inplements several improvements for the optimisation interface.

Solving an optimisation problem is now as simple as:

>> minimize(reduced_functional)

where reduced_functional is an implementation of the reduced functional (i.e. reduced_functional(m) must return the functional evaluation with parameter choice m).

I also wrote a new class ReducedFunctional that implements such a reduced functional based on the current annotation of the forward model (and using the new functional implementation, cheers for that).
An example how to use it is:
>> reduced_functional = ReducedFunctional(J, InitialConditionParameter(m))

A functional value for a new parameter set can then be easily evaluated with (make sure that the forward model is annotated before calling this):
>> func_value = reduced_functional(m)

I also interfaced new optimisation algorithms: conjugate gradient, BFGS, and the truncated Newton method

All tests pass, so it might be worth giving it a try.

To post a comment you must log in.
356. By Simon Funke

- optimisation -> optimization (follow the standard in programming)
- bugfix for scipy.slsqp method, where the parameter full_output = True caused the optimisation modele to crash
- new "scale" parameter which can be used to rescale the problem

357. By Simon Funke

- Add a maximization function
- Minor fixes

358. By Simon Funke

Minimize/maximize now return the optimal control

359. By David Ham

merge from trunk

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'dolfin_adjoint/__init__.py'
2--- dolfin_adjoint/__init__.py 2012-07-13 18:23:30 +0000
3+++ dolfin_adjoint/__init__.py 2012-08-29 10:56:40 +0000
4@@ -26,5 +26,6 @@
5 import gst
6 import function
7 import genericmatrix
8-import optimisation
9+import optimization
10+import reduced_functional
11 from ui import *
12
13=== renamed file 'dolfin_adjoint/optimisation.py' => 'dolfin_adjoint/optimization.py'
14--- dolfin_adjoint/optimisation.py 2012-07-03 15:16:10 +0000
15+++ dolfin_adjoint/optimization.py 2012-08-29 10:56:40 +0000
16@@ -1,57 +1,86 @@
17-from dolfin import *
18-from dolfin_adjoint import *
19+import dolfin
20+from dolfin import cpp, MPI, info_red, info
21+from dolfin_adjoint import constant, utils
22+from reduced_functional import ReducedFunctional
23 import numpy
24 import sys
25
26-def get_global(m):
27- ''' Takes a distributed object and returns a numpy array that contains all global values '''
28- if type(m) == float:
29- return numpy.array(m)
30- if type(m) == constant.Constant:
31- a = numpy.zeros(m.value_size())
32- p = numpy.zeros(m.value_size())
33- m.eval(a, p)
34- return a
35- elif type(m) in (function.Function, functions.function.Function):
36- m_v = m.vector()
37- m_a = cpp.DoubleArray(m.vector().size())
38- try:
39- m.vector().gather(m_a, numpy.arange(m_v.size(), dtype='I'))
40- return numpy.array(m_a.array())
41- except TypeError:
42- m_a = m.vector().gather(numpy.arange(m_v.size(), dtype='I'))
43- return m_a
44- else:
45- raise TypeError, 'Unknown parameter type %s.' % str(type(m))
46-
47-def set_local(m, m_global_array):
48- ''' Sets the local values of the distrbuted object m to the values contained in the global array m_global_array '''
49- if type(m) == constant.Constant:
50- if m.rank() == 0:
51- m.assign(m_global_array[0])
52- else:
53- m.assign(Constant(tuple(m_global_array)))
54- elif type(m) in (function.Function, functions.function.Function):
55- range_begin, range_end = m.vector().local_range()
56- m_a_local = m_global_array[range_begin:range_end]
57- m.vector().set_local(m_a_local)
58- m.vector().apply('insert')
59- else:
60- raise TypeError, 'Unknown parameter type'
61+def get_global(m_list):
62+ ''' Takes a (optional a list of) distributed object(s) and returns one numpy array containing their global values '''
63+ if not isinstance(m_list, (list, tuple)):
64+ m_list = [m_list]
65+
66+ m_global = []
67+ for m in m_list:
68+ # Parameters of type float
69+ if m == None or type(m) == float:
70+ m_global.append(m)
71+ # Parameters of type Constant
72+ elif type(m) == constant.Constant:
73+ a = numpy.zeros(m.value_size())
74+ p = numpy.zeros(m.value_size())
75+ m.eval(a, p)
76+ m_global += a.tolist()
77+ # Function parameters of type Function
78+ elif hasattr(m, "vector"):
79+ m_v = m.vector()
80+ m_a = cpp.DoubleArray(m.vector().size())
81+ try:
82+ m.vector().gather(m_a, numpy.arange(m_v.size(), dtype='I'))
83+ m_global += m_a.array().tolist()
84+ except TypeError:
85+ m_a = m.vector().gather(numpy.arange(m_v.size(), dtype='I'))
86+ m_global += m_a.tolist()
87+ else:
88+ raise TypeError, 'Unknown parameter type %s.' % str(type(m))
89+
90+ return numpy.array(m_global, dtype='d')
91+
92+def set_local(m_list, m_global_array):
93+ ''' Sets the local values of a (or optionally a list of) distributed object(s) to the values contained in the global array m_global_array '''
94+
95+ if not isinstance(m_list, (list, tuple)):
96+ m_list = [m_list]
97+
98+ offset = 0
99+ for m in m_list:
100+ # Parameters of type dolfin.Constant
101+ if type(m) == constant.Constant:
102+ m.assign(constant.Constant(numpy.reshape(m_global_array[offset:offset+m.value_size()], m.shape())))
103+ offset += m.value_size()
104+ # Function parameters of type dolfin.Function
105+ elif hasattr(m, "vector"):
106+ range_begin, range_end = m.vector().local_range()
107+ m_a_local = m_global_array[offset + range_begin:offset + range_end]
108+ m.vector().set_local(m_a_local)
109+ m.vector().apply('insert')
110+ offset += m.vector().size()
111+ else:
112+ raise TypeError, 'Unknown parameter type'
113
114 def serialise_bounds(bounds, m):
115- ''' Converts bounds to an array of tuples and serialises it in a parallel environment. '''
116-
117- bounds_arr = []
118+ ''' Converts bounds to an array of (min, max) tuples and serialises it in a parallel environment. '''
119+
120+ # Convert the bounds into the canoncial array form [ [lower_bound1, lower_bound2, ... ], [upper_bound1, upper_bound2, ...] ]
121+ if len(numpy.array(bounds).shape) == 1:
122+ bounds = numpy.array([[b] for b in bounds])
123+
124+ if len(bounds) != 2:
125+ raise ValueError, "The 'bounds' parameter must be of the form [lower_bound, upper_bound] for one parameter or [ [lower_bound1, lower_bound2, ...], [upper_bound1, upper_bound2, ...] ] for multiple parameters."
126+
127+ bounds_arr = [[], []]
128 for i in range(2):
129- if type(bounds[i]) == int or type(bounds[i]) == float:
130- bounds_arr.append(bounds[i]*numpy.ones(m.vector().size()))
131- else:
132- bounds_arr.append(get_global(bounds[i]))
133-
134+ for j in range(len(bounds[i])):
135+ if type(bounds[i][j]) in [int, float, numpy.int32, numpy.int64]:
136+ bounds_arr[i] += (bounds[i][j]*numpy.ones(m[j].vector().size())).tolist()
137+ else:
138+ bounds_arr[i] += get_global(bounds[i][j]).tolist()
139+
140+ # Transpose and return the array to get the form [ [lower_bound1, upper_bound1], [lower_bound2, upper_bound2], ... ]
141 return numpy.array(bounds_arr).T
142
143-def minimise_scipy_slsqp(J, dJ, m, bounds = None, **kwargs):
144+def minimize_scipy_slsqp(J, dJ, m, bounds = None, **kwargs):
145+ ''' Interface to the SQP algorithm in scipy '''
146 from scipy.optimize import fmin_slsqp
147
148 m_global = get_global(m)
149@@ -65,9 +94,13 @@
150 mopt = fmin_slsqp(J, m_global, fprime = dJ, bounds = bounds, **kwargs)
151 else:
152 mopt = fmin_slsqp(J, m_global, fprime = dJ, **kwargs)
153- set_local(m, mopt)
154+ if type(mopt) == list:
155+ mopt = mopt[0]
156+ set_local(m, numpy.array(mopt))
157+ return m
158
159-def minimise_scipy_fmin_l_bfgs_b(J, dJ, m, bounds = None, **kwargs):
160+def minimize_scipy_fmin_l_bfgs_b(J, dJ, m, bounds = None, **kwargs):
161+ ''' Interface to the L-BFGS-B algorithm in scipy '''
162 from scipy.optimize import fmin_l_bfgs_b
163
164 m_global = get_global(m)
165@@ -81,72 +114,144 @@
166
167 mopt, f, d = fmin_l_bfgs_b(J, m_global, fprime = dJ, bounds = bounds, **kwargs)
168 set_local(m, mopt)
169-
170-optimisation_algorithms_dict = {'scipy.l_bfgs_b': ('The L-BFGS-B implementation in scipy.', minimise_scipy_fmin_l_bfgs_b),
171- 'scipy.slsqp': ('The SLSQP implementation in scipy.', minimise_scipy_slsqp) }
172-
173-def print_optimisation_algorithms():
174- ''' Prints the available optimisation algorithms '''
175-
176- print 'Available optimisation algorithms:'
177- for function_name, (description, func) in optimisation_algorithms_dict.iteritems():
178+ return m
179+
180+def minimize_scipy_tnc(J, dJ, m, bounds = None, **kwargs):
181+ from scipy.optimize import fmin_tnc
182+
183+ m_global = get_global(m)
184+
185+ # Shut up all processors but the first one.
186+ if MPI.process_number() != 0:
187+ kwargs['iprint'] = -1
188+
189+ if bounds:
190+ bounds = serialise_bounds(bounds, m)
191+
192+ mopt, nfeval, rc = fmin_tnc(J, m_global, fprime = dJ, bounds = bounds, **kwargs)
193+ set_local(m, mopt)
194+ return m
195+
196+def minimize_scipy_cg(J, dJ, m, **kwargs):
197+ from scipy.optimize import fmin_cg
198+
199+ m_global = get_global(m)
200+
201+ # Shut up all processors but the first one.
202+ if MPI.process_number() != 0:
203+ kwargs['iprint'] = -1
204+
205+ mopt, fopt, func_calls, grad_calls, warnflag, allvecs = fmin_cg(J, m_global, fprime = dJ, **kwargs)
206+ set_local(m, mopt)
207+ return m
208+
209+def minimize_scipy_bfgs(J, dJ, m, **kwargs):
210+ from scipy.optimize import fmin_bfgs
211+
212+ m_global = get_global(m)
213+
214+ # Shut up all processors but the first one.
215+ if MPI.process_number() != 0:
216+ kwargs['iprint'] = -1
217+
218+ mopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag, allvecs = fmin_bfgs(J, m_global, fprime = dJ, **kwargs)
219+ set_local(m, mopt)
220+ return m
221+
222+optimization_algorithms_dict = {'scipy.l_bfgs_b': ('The L-BFGS-B implementation in scipy.', minimize_scipy_fmin_l_bfgs_b),
223+ 'scipy.slsqp': ('The SLSQP implementation in scipy.', minimize_scipy_slsqp),
224+ 'scipy.tnc': ('The truncated Newton algorithm implemented in scipy.', minimize_scipy_tnc),
225+ 'scipy.cg': ('The nonlinear conjugate gradient algorithm implemented in scipy.', minimize_scipy_cg),
226+ 'scipy.bfgs': ('The BFGS implementation in scipy.', minimize_scipy_bfgs),
227+ }
228+
229+def print_optimization_algorithms():
230+ ''' Prints the available optimization algorithms '''
231+
232+ print 'Available optimization algorithms:'
233+ for function_name, (description, func) in optimization_algorithms_dict.iteritems():
234 print function_name, ': ', description
235
236-def minimise(reduced_functional, functional, parameter, m, algorithm, **kwargs):
237+def minimize(reduced_func, algorithm = 'scipy.l_bfgs_b', scale = 1.0, **kwargs):
238 ''' Solves the minimisation problem with PDE constraint:
239
240- min_m functional(u, m)
241+ min_m func(u, m)
242 s.t.
243 e(u, m) = 0
244 lb <= m <= ub
245 g(m) <= u
246
247- where m is the control variable, u is the solution of the PDE system e(u, m) = 0, functional is the functional of interest and lb, ub and g(m) constraints the control variables.
248- The optimisation problem is solved using a gradient based optimisation algorithm and the functional gradients are computed by solving the associated adjoint system.
249+ where m is the control variable, u is the solution of the PDE system e(u, m) = 0, func is the functional of interest and lb, ub and g(m) constraints the control variables.
250+ The optimization problem is solved using a gradient based optimization algorithm and the functional gradients are computed by solving the associated adjoint system.
251
252- The functional arguments are as follows:
253- * 'reduced_functional' must be a python function the implements the reduced functional (i.e. functional(u(m), m)). That is, it takes m as a parameter, solves the model and returns the functional value.
254- * 'functional' must be a dolfin_adjoint.functional object describing the functional of interest
255- * 'parameter' must be a dolfin_adjoint.parameter that is to be minimised
256- * 'm' must contain the control values. The optimisation algorithm uses these values as a initial guess and updates them after each optimisation iteration. The optimal control values can be accessed by reading 'm' after calling minimise.
257- * 'bounds' is an optional keyword parameter to support control constraints: bounds = (lb, ub). lb and ub can either be floats to enforce a global bound or a dolfin.Function to define a varying bound.
258- * 'algorithm' specifies the optimistation algorithm to be used to solve the problem. The available algorithms can be listed with the print_optimisation_algorithms function.
259+ The function arguments are as follows:
260+ * 'reduced_func' must be a ReducedFunctional object.
261+ * 'algorithm' specifies the optimization algorithm to be used to solve the problem. The available algorithms can be listed with the print_optimization_algorithms function.
262+ * 'scale' is a factor to scale to problem. Use a negative number to solve a maximisation problem.
263+ * 'bounds' is an optional keyword parameter to support control constraints: bounds = (lb, ub). lb and ub must be of the same type than the parameters m.
264
265- Additional arguments specific for the optimisation algorithms can be added to the minimise functions (e.g. iprint = 2). These arguments will be passed to the underlying optimisation algorithm. For detailed information about which arguments are supported for each optimisation algorithm, please refer to the documentaton of the optimisation algorithm.
266+ Additional arguments specific for the optimization algorithms can be added to the minimize functions (e.g. iprint = 2). These arguments will be passed to the underlying optimization algorithm. For detailed information about which arguments are supported for each optimization algorithm, please refer to the documentaton of the optimization algorithm.
267 '''
268- def dJ_array(m_array):
269+
270+ def reduced_func_deriv_array(m_array):
271+ ''' An implementation of the reduced functional derivative that accepts the parameter as an array '''
272
273 # In the case that the parameter values have changed since the last forward run,
274- # we need to rerun the forward model with the new parameters
275+ # we first need to rerun the forward model with the new parameters to have the
276+ # correct forward solutions
277+ m = [p.data() for p in reduced_func.parameter]
278 if (m_array != get_global(m)).any():
279- reduced_functional_array(m_array)
280+ reduced_func_array(m_array)
281
282- dJdm = utils.compute_gradient(functional, parameter)
283+ dJdm = utils.compute_gradient(reduced_func.functional, reduced_func.parameter)
284 dJdm_global = get_global(dJdm)
285
286- if dolfin.parameters["optimisation"]["test_gradient"]:
287- minconv = utils.test_gradient_array(reduced_functional_array, dJdm_global, m_array,
288- seed = dolfin.parameters["optimisation"]["test_gradient_seed"])
289+ # Perform the gradient test
290+ if dolfin.parameters["optimization"]["test_gradient"]:
291+ minconv = utils.test_gradient_array(reduced_func_array, scale * dJdm_global, m_array,
292+ seed = dolfin.parameters["optimization"]["test_gradient_seed"])
293 if minconv < 1.9:
294 raise RuntimeWarning, "A gradient test failed during execution."
295 else:
296 info("Gradient test succesfull.")
297- reduced_functional_array(m_array)
298-
299- return dJdm_global
300-
301- def reduced_functional_array(m_array):
302-
303- # Reset any prior annotation of the adjointer as we are about to rerun the forward model.
304- solving.adj_reset()
305- # If functional is a FinalFunctinal, we need to set the activated flag to False
306- if hasattr(functional, 'activated'):
307- functional.activated = False
308-
309+ reduced_func_array(m_array)
310+
311+ return scale * dJdm_global
312+
313+ def reduced_func_array(m_array):
314+ ''' An implementation of the reduced functional that accepts the parameter as an array '''
315+ # In case the annotation is not reused, we need to reset any prior annotation of the adjointer before reruning the forward model.
316+ if not reduced_func.replays_annotation:
317+ solving.adj_reset()
318+
319+ # Set the parameter values and execute the reduced functional
320+ m = [p.data() for p in reduced_func.parameter]
321 set_local(m, m_array)
322- return reduced_functional(m)
323-
324- if algorithm not in optimisation_algorithms_dict.keys():
325- raise ValueError, 'Unknown optimisation algorithm ' + algorithm + '. Use the print_optimisation_algorithms to get a list of the available algorithms.'
326-
327- optimisation_algorithms_dict[algorithm][1](reduced_functional_array, dJ_array, m, **kwargs)
328+ return scale * reduced_func(m)
329+
330+ if algorithm not in optimization_algorithms_dict.keys():
331+ raise ValueError, 'Unknown optimization algorithm ' + algorithm + '. Use the print_optimization_algorithms to get a list of the available algorithms.'
332+
333+ return optimization_algorithms_dict[algorithm][1](reduced_func_array, reduced_func_deriv_array, [p.data() for p in reduced_func.parameter], **kwargs)
334+
335+def maximize(reduced_func, algorithm = 'scipy.l_bfgs_b', scale = 1.0, **kwargs):
336+ ''' Solves the maximisation problem with PDE constraint:
337+
338+ max_m func(u, m)
339+ s.t.
340+ e(u, m) = 0
341+ lb <= m <= ub
342+ g(m) <= u
343+
344+ where m is the control variable, u is the solution of the PDE system e(u, m) = 0, func is the functional of interest and lb, ub and g(m) constraints the control variables.
345+ The optimization problem is solved using a gradient based optimization algorithm and the functional gradients are computed by solving the associated adjoint system.
346+
347+ The function arguments are as follows:
348+ * 'reduced_func' must be a ReducedFunctional object.
349+ * 'algorithm' specifies the optimization algorithm to be used to solve the problem. The available algorithms can be listed with the print_optimization_algorithms function.
350+ * 'scale' is a factor to scale to problem. Use a negative number to solve a maximisation problem.
351+ * 'bounds' is an optional keyword parameter to support control constraints: bounds = (lb, ub). lb and ub must be of the same type than the parameters m.
352+
353+ Additional arguments specific for the optimization algorithms can be added to the minimize functions (e.g. iprint = 2). These arguments will be passed to the underlying optimization algorithm. For detailed information about which arguments are supported for each optimization algorithm, please refer to the documentaton of the optimization algorithm.
354+ '''
355+ return minimize(reduced_func, algorithm, scale = -scale, **kwargs)
356
357=== modified file 'dolfin_adjoint/options.py'
358--- dolfin_adjoint/options.py 2012-06-11 11:13:00 +0000
359+++ dolfin_adjoint/options.py 2012-08-29 10:56:40 +0000
360@@ -7,7 +7,7 @@
361 adj_params.add("fussy_replay", True)
362 adj_params.add("stop_annotating", False)
363
364-opt_params = Parameters("optimisation")
365+opt_params = Parameters("optimization")
366 opt_params.add("test_gradient", False)
367 opt_params.add("test_gradient_seed", 0.0001)
368
369
370=== modified file 'dolfin_adjoint/parameter.py'
371--- dolfin_adjoint/parameter.py 2012-07-30 20:44:01 +0000
372+++ dolfin_adjoint/parameter.py 2012-08-29 10:56:40 +0000
373@@ -23,6 +23,7 @@
374 '''coeff: the variable whose initial condition you wish to perturb.
375 perturbation: the perturbation direction in which you wish to compute the gradient. Must be a Function.'''
376
377+ self.coeff = coeff
378 self.var = None
379 # Find the first occurance of the coeffcient
380 for t in range(adjglobals.adjointer.timestep_count):
381@@ -58,6 +59,9 @@
382 else:
383 return None
384
385+ def data(self):
386+ return self.coeff
387+
388 class ScalarParameter(DolfinAdjointParameter):
389 '''This Parameter is used as input to the tangent linear model (TLM)
390 when one wishes to compute dJ/da, where a is a single scalar parameter.'''
391@@ -136,6 +140,9 @@
392 else:
393 return None
394
395+ def data(self):
396+ return self.a
397+
398 class ScalarParameters(DolfinAdjointParameter):
399 '''This Parameter is used as input to the tangent linear model (TLM)
400 when one wishes to compute dJ/dv . delta v, where v is a vector of scalar parameters.'''
401@@ -194,3 +201,6 @@
402 dJdv[i] = out
403
404 return dJdv
405+
406+ def data(self):
407+ return self.v
408
409=== added file 'dolfin_adjoint/reduced_functional.py'
410--- dolfin_adjoint/reduced_functional.py 1970-01-01 00:00:00 +0000
411+++ dolfin_adjoint/reduced_functional.py 2012-08-29 10:56:40 +0000
412@@ -0,0 +1,79 @@
413+import libadjoint
414+from dolfin_adjoint import adjlinalg, adjrhs, constant
415+from dolfin_adjoint.adjglobals import adjointer
416+
417+class DummyEquation(object):
418+ pass
419+
420+class ReducedFunctional(object):
421+ def __init__(self, functional, parameter):
422+ ''' Creates a reduced functional object, that evaluates the functional value for a given parameter value.
423+ The arguments are as follows:
424+ * 'functional' must be a dolfin_adjoint.Functional object.
425+ * 'parameter' must be a single or a list of dolfin_adjoint.DolfinAdjointParameter objects.
426+ '''
427+ self.functional = functional
428+ if not isinstance(parameter, (list, tuple)):
429+ parameter = [parameter]
430+ self.parameter = parameter
431+ # This flag indicates if the functional evaluation is based on replaying the forward annotation.
432+ self.replays_annotation = True
433+ self.eqns = []
434+
435+ def eval_callback(self, value):
436+ ''' This function is called before the reduced functional is evaluated.
437+ It is intended to be overwritten by the user, for example to plot the control values
438+ that are passed into the callback as "value". '''
439+ pass
440+
441+ def __call__(self, value):
442+ ''' Evaluates the reduced functional for the given parameter value, by replaying the forward model.
443+ Note: before using this evaluation, make sure that the forward model has been annotated. '''
444+
445+ self.eval_callback(value)
446+ if not isinstance(value, (list, tuple)):
447+ value = [value]
448+ if len(value) != len(self.parameter):
449+ raise ValueError, "The number of parameters must equal the number of parameter values."
450+
451+ # Update the parameter values
452+ for i in range(len(value)):
453+ if type(value[i]) == constant.Constant:
454+ # Constants are not duplicated in the annotation. That is, changing a constant that occurs
455+ # in the forward model will also change the forward replay with libadjoint.
456+ # However, this is not the case for functions...
457+ pass
458+ elif hasattr(value[i], 'vector'):
459+ # ... since these are duplicated and then occur as rhs in the annotation.
460+ # Therefore, we need to update the right hand side callbacks for
461+ # the equation that targets the associated variable.
462+
463+ # Create a RHS object with the new control values
464+ init_rhs = adjlinalg.Vector(value[i]).duplicate()
465+ init_rhs.axpy(1.0, adjlinalg.Vector(value[i]))
466+ rhs = adjrhs.RHS(init_rhs)
467+ # Register the new rhs in the annotation
468+ eqn = DummyEquation()
469+ eqn_nb = self.parameter[i].var.equation_nb(adjointer)
470+ eqn.equation = adjointer.adjointer.equations[eqn_nb]
471+ # Store the equation as a class variable in order to keep a python reference in the memory
472+ self.eqns.append(eqn)
473+ rhs.register(self.eqns[-1])
474+ else:
475+ raise NotImplementedError, "The ReducedFunctional class currently only works for parameters that are Functions"
476+
477+
478+ # Replay the annotation and evaluate the functional
479+ func_value = 0.
480+ for i in range(adjointer.equation_count):
481+ (fwd_var, output) = adjointer.get_forward_solution(i)
482+
483+ storage = libadjoint.MemoryStorage(output)
484+ storage.set_overwrite(True)
485+ adjointer.record_variable(fwd_var, storage)
486+ if i == adjointer.timestep_end_equation(fwd_var.timestep):
487+ func_value += adjointer.evaluate_functional(self.functional, fwd_var.timestep)
488+
489+ #adjglobals.adjointer.forget_forward_equation(i)
490+ return func_value
491+
492
493=== modified file 'dolfin_adjoint/solving.py'
494--- dolfin_adjoint/solving.py 2012-06-29 10:25:47 +0000
495+++ dolfin_adjoint/solving.py 2012-08-29 10:56:40 +0000
496@@ -118,6 +118,8 @@
497 # /before/ we map the coefficients -> dependencies,
498 # so that libadjoint records the dependencies with the right timestep number.
499 if not linear:
500+ # Register the initial condition before the first nonlinear solve
501+ register_initial_conditions([[u, adjglobals.adj_variables[u]],], linear=False)
502 var = adjglobals.adj_variables.next(u)
503 else:
504 var = None
505
506=== modified file 'dolfin_adjoint/ui.py'
507--- dolfin_adjoint/ui.py 2012-07-13 18:23:30 +0000
508+++ dolfin_adjoint/ui.py 2012-08-29 10:56:40 +0000
509@@ -17,3 +17,5 @@
510 from constant import Constant
511 from unimplemented import *
512 from timeforms import dt, TimeMeasure, START_TIME, FINISH_TIME
513+from reduced_functional import ReducedFunctional
514+from optimization import minimize, maximize, print_optimization_algorithms
515
516=== modified file 'dolfin_adjoint/utils.py'
517--- dolfin_adjoint/utils.py 2012-07-03 16:25:55 +0000
518+++ dolfin_adjoint/utils.py 2012-08-29 10:56:40 +0000
519@@ -321,7 +321,14 @@
520 return min(convergence_order(with_gradient))
521
522 def compute_gradient(J, param, forget=True):
523- dJdparam = None
524+ try:
525+ scalar = False
526+ dJdparam = [None for i in range(len(param))]
527+ lparam = param
528+ except TypeError:
529+ scalar = True
530+ dJdparam = [None]
531+ lparam = [param]
532 last_timestep = adjglobals.adjointer.timestep_count
533
534 for i in range(adjglobals.adjointer.timestep_count):
535@@ -334,14 +341,15 @@
536 adjglobals.adjointer.record_variable(adj_var, storage)
537 fwd_var = libadjoint.Variable(adj_var.name, adj_var.timestep, adj_var.iteration)
538
539- out = param.inner_adjoint(adjglobals.adjointer, output.data, i, fwd_var)
540- dJdparam = _add(dJdparam, out)
541+ for j in range(len(lparam)):
542+ out = lparam[j].inner_adjoint(adjglobals.adjointer, output.data, i, fwd_var)
543+ dJdparam[j] = _add(dJdparam[j], out)
544
545- if last_timestep > adj_var.timestep:
546- # We have hit a new timestep, and need to compute this timesteps' \partial J/\partial m contribution
547- last_timestep = adj_var.timestep
548- out = param.partial_derivative(adjglobals.adjointer, J, adj_var.timestep)
549- dJdparam = _add(dJdparam, out)
550+ if last_timestep > adj_var.timestep:
551+ # We have hit a new timestep, and need to compute this timesteps' \partial J/\partial m contribution
552+ last_timestep = adj_var.timestep
553+ out = lparam[j].partial_derivative(adjglobals.adjointer, J, adj_var.timestep)
554+ dJdparam[j] = _add(dJdparam[j], out)
555
556 if forget is None:
557 pass
558@@ -350,7 +358,10 @@
559 else:
560 adjglobals.adjointer.forget_adjoint_values(i)
561
562- return dJdparam
563+ if scalar:
564+ return dJdparam[0]
565+ else:
566+ return dJdparam
567
568 def test_scalar_parameter_adjoint(J, a, dJda, seed=None):
569 info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")
570
571=== modified file 'tests/burgers_oo/burgers_oo.py'
572--- tests/burgers_oo/burgers_oo.py 2012-07-02 18:37:16 +0000
573+++ tests/burgers_oo/burgers_oo.py 2012-08-29 10:56:40 +0000
574@@ -76,15 +76,15 @@
575 print "Running adjoint ... "
576
577 J = Functional(forward*forward*dx*dt[FINISH_TIME])
578- for (adjoint, var) in compute_adjoint(J, forget=False):
579- pass
580+ dJdm = compute_gradient(J, InitialConditionParameter(forward), forget = False)
581
582 def Jfunc(ic):
583 forward = main(ic, annotate=False)
584 return assemble(forward*forward*dx)
585
586- minconv = test_initial_condition_adjoint(Jfunc, ic, adjoint, seed=1.0e-5)
587+ minconv = test_initial_condition_adjoint(Jfunc, ic, dJdm, seed=1.0e-5)
588 if minconv < 1.9:
589+ info_red("Test failed. Convergence rate is %f < 1.9", minconv)
590 sys.exit(1)
591
592 dJ = assemble(derivative(forward_copy*forward_copy*dx, forward_copy))
593@@ -93,4 +93,5 @@
594 ic.vector()[:] = ic_copy.vector()
595 minconv = test_initial_condition_tlm(Jfunc, dJ, ic, seed=1.0e-5)
596 if minconv < 1.9:
597+ info_red("Test failed. Convergence rate is %d < 1.9", minconv)
598 sys.exit(1)
599
600=== modified file 'tests/optimal_control_mms/optimal_control_mms.py'
601--- tests/optimal_control_mms/optimal_control_mms.py 2012-07-14 09:16:08 +0000
602+++ tests/optimal_control_mms/optimal_control_mms.py 2012-08-29 10:56:40 +0000
603@@ -1,5 +1,6 @@
604 """ Solves a MMS problem with smooth control """
605
606+import sys
607 from dolfin import *
608 from dolfin_adjoint import *
609
610@@ -24,14 +25,15 @@
611 u_d = 1/(2*pi**2)*sin(pi*x[0])*sin(pi*x[1])
612
613 J = Functional((inner(u-u_d, u-u_d))*dx*dt[FINISH_TIME])
614- def Jfunc(m):
615- solve_pde(u, V, m)
616- return assemble(inner(u-u_d, u-u_d)*dx)
617+
618+ # Run the forward model once to create the annotation
619+ solve_pde(u, V, m)
620
621 # Run the optimisation
622- optimisation.minimise(Jfunc, J, InitialConditionParameter(m), m, algorithm = 'scipy.l_bfgs_b', pgtol=1e-16, factr=1, bounds = (-1, 1), iprint = 1, maxfun = 20)
623- #optimisation.minimise(Jfunc, J, InitialConditionParameter(m), m, algorithm = 'scipy.slsqp', bounds = (-1, 1), iprint = 3, iter = 60)
624- Jfunc(m)
625+ reduced_func = ReducedFunctional(J, InitialConditionParameter(m))
626+ minimize(reduced_func, algorithm = 'scipy.l_bfgs_b', pgtol=1e-16, factr=1, bounds = (-1, 1), iprint = 1, maxfun = 20)
627+ #minimize(reduced_func, algorithm = 'scipy.slsqp', bounds = (-1, 1), iprint = 3, iter = 60)
628+ solve_pde(u, V, m)
629
630 m_analytic = sin(pi*x[0])*sin(pi*x[1])
631 u_analytic = 1/(2*pi**2)*sin(pi*x[0])*sin(pi*x[1])
632@@ -49,6 +51,7 @@
633 control_errors.append(control_error)
634 state_errors.append(state_error)
635 element_sizes.append(1./n)
636+ adj_reset()
637
638 info_green("Control errors: " + str(control_errors))
639 info_green("Control convergence: " + str(convergence_order(control_errors, base = 2)))
640@@ -56,7 +59,9 @@
641 info_green("State convergence: " + str(convergence_order(state_errors, base = 2)))
642
643 if min(convergence_order(control_errors)) < 2.0:
644+ info_red("Convergence order below tolerance")
645 sys.exit(1)
646 if min(convergence_order(state_errors)) < 4.0:
647+ info_red("Convergence order below tolerance")
648 sys.exit(1)
649 info_green("Test passed")
650
651=== renamed directory 'tests/optimisation' => 'tests/optimization'
652=== renamed file 'tests/optimisation/optimisation.py' => 'tests/optimization/optimization.py'
653--- tests/optimisation/optimisation.py 2012-07-03 16:25:55 +0000
654+++ tests/optimization/optimization.py 2012-08-29 10:56:40 +0000
655@@ -4,9 +4,10 @@
656
657 from dolfin import *
658 from dolfin_adjoint import *
659+import libadjoint
660
661 dolfin.set_log_level(ERROR)
662-dolfin.parameters["optimisation"]["test_gradient"] = True
663+dolfin.parameters["optimization"]["test_gradient"] = True
664
665 n = 10
666 mesh = UnitInterval(n)
667@@ -30,12 +31,13 @@
668
669 t = 0.0
670 end = 0.2
671+ adjointer.time.start(t)
672 while (t <= end):
673 solve(F == 0, u_next, bc, annotate=annotate)
674 u.assign(u_next, annotate=annotate)
675
676 t += float(timestep)
677- adj_inc_timestep()
678+ adj_inc_timestep(time=t, finished = t>end)
679
680 if __name__ == "__main__":
681
682@@ -43,21 +45,29 @@
683 u = Function(ic, name='Velocity')
684
685 J = Functional(u*u*dx*dt[FINISH_TIME])
686- def Jfunc(ic):
687- u.assign(ic)
688- main(u, annotate=True)
689- return assemble(u*u*dx)
690+
691+ # Run the model once to create the annotation
692+ u.assign(ic)
693+ main(u, annotate=True)
694
695 # Run the optimisation
696 lb = project(Expression("-1"), V)
697- optimisation.minimise(Jfunc, J, InitialConditionParameter(u), ic, algorithm = 'scipy.l_bfgs_b', pgtol=1e-6, factr=1e5, bounds = (lb, 1), iprint = 1)
698+
699+ # Define the reduced funtional
700+ reduced_functional = ReducedFunctional(J, InitialConditionParameter(u))
701+
702+ # Run the optimisation problem with gradient tests and L-BFGS-B
703+ u_opt = minimize(reduced_functional, algorithm = 'scipy.l_bfgs_b', pgtol=1e-6, factr=1e5, bounds = (lb, 1), iprint = 1)
704 ic = project(Expression("sin(2*pi*x[0])"), V)
705
706- # For performance reasons, switch the gradient test off
707- dolfin.parameters["optimisation"]["test_gradient"] = False
708- optimisation.minimise(Jfunc, J, InitialConditionParameter(u), ic, algorithm = 'scipy.slsqp', bounds = (lb, 1), iprint = 2, acc = 1e-10)
709+ # Run the problem again with SQP, this time for performance reasons with the gradient test switched off
710+ dolfin.parameters["optimization"]["test_gradient"] = False
711+ u_opt = minimize(reduced_functional, algorithm = 'scipy.slsqp', bounds = (lb, 1), iprint = 2, acc = 1e-10)
712
713 tol = 1e-9
714- if Jfunc(ic) > tol:
715- print 'Test failed: Optimised functional value exceeds tolerance: ' , Jfunc(ic), ' > ', tol, '.'
716+ final_functional = reduced_functional(u_opt)
717+ print "Final functional value: ", final_functional
718+ if final_functional > tol:
719+ print 'Test failed: Optimised functional value exceeds tolerance: ' , final_functional, ' > ', tol, '.'
720 sys.exit(1)
721+
722
723=== renamed directory 'tests/optimisation_scalar' => 'tests/optimization_scalar'
724=== renamed file 'tests/optimisation_scalar/optimisation_scalar.py' => 'tests/optimization_scalar/optimization_scalar.py'
725--- tests/optimisation_scalar/optimisation_scalar.py 2012-07-02 18:37:16 +0000
726+++ tests/optimization_scalar/optimization_scalar.py 2012-08-29 10:56:40 +0000
727@@ -3,7 +3,7 @@
728 import sys
729
730 dolfin.set_log_level(ERROR)
731-dolfin.parameters["optimisation"]["test_gradient"] = True
732+dolfin.parameters["optimization"]["test_gradient"] = True
733
734 n = 10
735 mesh = UnitInterval(n)
736@@ -33,19 +33,16 @@
737
738 if __name__ == "__main__":
739 nu = Constant(0.0001)
740+ # Run the forward model once to have the annotation
741 main(nu)
742
743 J = Functional(inner(u, u)*dx*dt[FINISH_TIME])
744
745- def Jhat(nu):
746- u.assign(ic)
747- main(nu)
748- return assemble(inner(u, u)*dx)
749-
750 # Run the optimisation
751- optimisation.minimise(Jhat, J, ScalarParameter(nu), nu, 'scipy.slsqp', iprint = 2)
752+ reduced_functional = ReducedFunctional(J, ScalarParameter(nu))
753+ nu_opt = minimize(reduced_functional, 'scipy.slsqp', iprint = 2)
754
755 tol = 1e-4
756- if Jhat(nu) > tol:
757- print 'Test failed: Optimised functional value exceeds tolerance: ' , Jhat(nu), ' > ', tol, '.'
758+ if reduced_functional(nu_opt) > tol:
759+ print 'Test failed: Optimised functional value exceeds tolerance: ', reduced_functional(nu_opt), ' > ', tol, '.'
760 sys.exit(1)
761
762=== added directory 'tests/reduced_functional_evaluation'
763=== added file 'tests/reduced_functional_evaluation/reduced_functional_evaluation.py'
764--- tests/reduced_functional_evaluation/reduced_functional_evaluation.py 1970-01-01 00:00:00 +0000
765+++ tests/reduced_functional_evaluation/reduced_functional_evaluation.py 2012-08-29 10:56:40 +0000
766@@ -0,0 +1,54 @@
767+''' A simple test that compares the functional value computed manually and with libadjoints functional_evaluation.
768+ Writting this test was motivated by the bug described on https://bugs.launchpad.net/dolfin-adjoint/+bug/1032291 '''
769+from dolfin import *
770+from dolfin_adjoint import *
771+import sys
772+
773+# Global settings
774+set_log_level(ERROR)
775+
776+mesh = UnitSquare(10, 10)
777+V = FunctionSpace(mesh, "DG", 1)
778+
779+u_new = Function(V, name = "u_new")
780+u_old = Function(V, name = "u_old")
781+u_test = TestFunction(V)
782+
783+T = 2.
784+t = 0.
785+dlt = 1.
786+F1 = ( inner((u_new - u_old)/dlt, u_test)*dx - inner(Constant(1.), u_test)*dx )
787+#solve(inner(u_new, u_test)*dx == 0, u_new)
788+
789+adjointer.time.start(t)
790+man_func_value = 0.
791+print "+++++++++++++ INITIAL RUN +++++++++"
792+man_func_value_contr = 0.5*assemble(inner(u_new, u_new)*dx)
793+while t < T:
794+
795+ solve(F1 == 0, u_new)
796+ u_old.assign(u_new)
797+
798+ t += dlt
799+ man_func_value_contr = assemble(inner(u_new, u_new)*dx)
800+ if t>=T:
801+ man_func_value += 0.5*man_func_value_contr
802+ else:
803+ man_func_value += man_func_value_contr
804+ adj_inc_timestep(time=t, finished = t>=T)
805+
806+info_green("Manually computed functional value: %f", man_func_value)
807+adj_html("forward.html", "forward")
808+print
809+print "+++++++++++++ REPLAY +++++++++"
810+u_new.vector()[:] = 0.
811+u_old.vector()[:] = 0.
812+J = Functional(inner(u_new, u_new)*dx*dt)
813+reduced_functional = ReducedFunctional(J, InitialConditionParameter(u_old))
814+reduced_functional_value = reduced_functional(u_old)
815+info_green("Functional value from reduced functional: %f", reduced_functional_value)
816+
817+if abs(reduced_functional_value - man_func_value) > 1e-13:
818+ info_red("Test failed. Error: %f", abs(reduced_functional_value - man_func_value))
819+else:
820+ info_green("Test passed")
821
822=== modified file 'tests/test.py'
823--- tests/test.py 2012-07-14 17:27:55 +0000
824+++ tests/test.py 2012-08-29 10:56:40 +0000
825@@ -13,7 +13,7 @@
826 'navier_stokes': 'mpirun -n 2 python navier_stokes.py',
827 'svd_simple': 'mpirun -n 2 python svd_simple.py',
828 'gst_mass': 'mpirun -n 2 python gst_mass.py',
829- 'optimisation': 'mpirun -n 2 python optimisation.py',
830+ 'optimization': 'mpirun -n 2 python optimization.py',
831 'optimal_control_mms': 'mpirun -n 2 python optimal_control_mms.py',
832 'differentiability-dg-upwind': None,
833 'differentiability-stokes': None,

Subscribers

People subscribed via source and target branches

to status/vote changes: