Merge lp:~frankban/juju-deployer/unit-placement-fix into lp:juju-deployer
- unit-placement-fix
- Merge into trunk
Status: | Merged |
---|---|
Merged at revision: | 152 |
Proposed branch: | lp:~frankban/juju-deployer/unit-placement-fix |
Merge into: | lp:juju-deployer |
Diff against target: |
593 lines (+156/-107) 11 files modified
deployer/action/importer.py (+15/-24) deployer/config.py (+5/-5) deployer/deployment.py (+3/-3) deployer/service.py (+46/-45) deployer/tests/base.py (+36/-0) deployer/tests/test_data/v4/container-existing-machine.yaml (+1/-1) deployer/tests/test_data/v4/placement-invalid-number.yaml (+8/-0) deployer/tests/test_deployment.py (+23/-10) deployer/tests/test_guiserver.py (+7/-3) deployer/tests/test_importer.py (+11/-15) setup.py (+1/-1) |
To merge this branch: | bzr merge lp:~frankban/juju-deployer/unit-placement-fix |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Madison Scott-Clary (community) | code and qa | Approve | |
Tim Van Steenburgh (community) | Approve | ||
Brad Crittenden (community) | code | Approve | |
Review via email: mp+267187@code.launchpad.net |
Commit message
Description of the change
Fix deployments of bundles v4.
Fix some incorrect behavior of new v4 bundles, mostly
concerning validation and ordering of services.
Improve status retrieval while adding unit.
Fix checking for existing machines: it did work
properly only with top level machines.
Fix a problem with machines mapping while
co-locating units to other services, units or
to declared machines.
Improve the logic used for detecting v4 vs v3
bundle syntax: the machines section in v4 can
be safely omitted accotding to the spec.
Fix sleep loops and memory leaks while running
tests exercising the importer.
Some lint/code style clean up.
Bump version up a little.
Tim Van Steenburgh (tvansteenburgh) wrote : | # |
Brad Crittenden (bac) wrote : | # |
Thanks for this fix Francesco. It LGTM.
Francesco Banconi (frankban) wrote : | # |
As mentioned on IRC, the sleep loops are solved by setting up the wnv mock with patch_env_status.
I also removed the reloading logic, as requested.
Thank you both for the reviews!
- 159. By Francesco Banconi
-
Remove the reloading logic in importer.add_unit.
Tim Van Steenburgh (tvansteenburgh) wrote : | # |
LGTM!
Madison Scott-Clary (makyo) wrote : | # |
Thanks Francesco, much appreciated, this LGTM. QA okay on local.
Preview Diff
1 | === modified file 'deployer/action/importer.py' |
2 | --- deployer/action/importer.py 2015-08-04 12:50:12 +0000 |
3 | +++ deployer/action/importer.py 2015-08-06 14:43:20 +0000 |
4 | @@ -14,13 +14,11 @@ |
5 | self.options = options |
6 | self.env = env |
7 | self.deployment = deployment |
8 | - self.machines = {} |
9 | |
10 | def add_units(self): |
11 | self.log.debug("Adding units...") |
12 | # Add units to existing services that don't match count. |
13 | env_status = self.env.status() |
14 | - reloaded = False |
15 | |
16 | # Workaround an issue where watch output doesn't include subordinate |
17 | # services right away, and add_unit would fail while attempting to add |
18 | @@ -56,18 +54,9 @@ |
19 | self.log.info( |
20 | "Adding %d more units to %s" % (abs(delta), svc.name)) |
21 | if svc.unit_placement: |
22 | - # Reload status once after non placed services units are done. |
23 | - if reloaded is False: |
24 | - # Improved crappy workaround juju-core api inconsistency |
25 | - delay = time.time() + 60 |
26 | - while delay > time.time(): |
27 | - time.sleep(5.1) |
28 | - env_status = self.env.status() |
29 | - if svc.name in env_status['services']: |
30 | - break |
31 | - |
32 | - reloaded = True |
33 | - |
34 | + # Reload status before each placed unit is deployed, so that |
35 | + # co-location to other units can take place properly. |
36 | + env_status = self.env.status() |
37 | placement = self.deployment.get_unit_placement(svc, env_status) |
38 | for mid in range(cur_units, svc.num_units): |
39 | self.env.add_unit(svc.name, placement.get(mid)) |
40 | @@ -76,8 +65,7 @@ |
41 | |
42 | def machine_exists(self, id): |
43 | """Checks if the given id exists on the current environment.""" |
44 | - return int(id) in map(int, |
45 | - self.env.status().get('machines', {}).keys()) |
46 | + return str(id) in map(str, self.env.status().get('machines', {})) |
47 | |
48 | def create_machines(self): |
49 | """Create machines as specified in the machine spec in the bundle. |
50 | @@ -104,19 +92,23 @@ |
51 | creation is skipped. |
52 | """ |
53 | machines = self.deployment.get_machines() |
54 | + machines_map = {} |
55 | |
56 | if machines: |
57 | self.log.info("Creating machines...") |
58 | for machine_name, spec in machines.items(): |
59 | if self.machine_exists(machine_name): |
60 | + # XXX frankban: do we really want this? The machine |
61 | + # identifiers as included in v4 bundles are not intended |
62 | + # to refer to real machines. A mapping could be provided |
63 | + # but this kind of implicit mapping seems weak. |
64 | self.log.info( |
65 | """Machine %s already exists on environment, """ |
66 | """skipping creation""" % machine_name) |
67 | - self.machines[machine_name] = self.deployment.get_machine( |
68 | - machine_name) |
69 | + machines_map[machine_name] = str(machine_name) |
70 | else: |
71 | self.log.info("Machine %s will be created" % machine_name) |
72 | - self.machines[machine_name] = self.env.add_machine( |
73 | + machines_map[machine_name] = self.env.add_machine( |
74 | series=spec.get('series', |
75 | self.deployment.data['series']), |
76 | constraints=spec.get('constraints')) |
77 | @@ -125,7 +117,7 @@ |
78 | annotations = spec.get('annotations') |
79 | if annotations: |
80 | self.env.set_annotation( |
81 | - self.machines[machine_name], |
82 | + machines_map[machine_name], |
83 | annotations, |
84 | entity_type='machine') |
85 | |
86 | @@ -139,13 +131,12 @@ |
87 | if self.machine_exists(machine_name): |
88 | self.log.info("Machine %s already exists," |
89 | "skipping creation" % machine_name) |
90 | - self.machines[container_host] = self.deployment.get_machine( |
91 | - container_host) |
92 | + machines_map[container_host] = str(container_host) |
93 | else: |
94 | self.log.info("A new container will be created" |
95 | "on machine: %s" % container_host) |
96 | - self.machines[container_host] = self.env.add_machine() |
97 | - self.deployment.set_machines(machines) |
98 | + machines_map[container_host] = self.env.add_machine() |
99 | + self.deployment.set_machines(machines_map) |
100 | |
101 | def get_charms(self): |
102 | # Get Charms |
103 | |
104 | === modified file 'deployer/config.py' |
105 | --- deployer/config.py 2015-03-16 20:00:02 +0000 |
106 | +++ deployer/config.py 2015-08-06 14:43:20 +0000 |
107 | @@ -51,9 +51,9 @@ |
108 | raise |
109 | |
110 | # Check if this is a v4 bundle. |
111 | - if 'services' in yaml_result: |
112 | - if 'machines' in yaml_result: |
113 | - self.version = 4 |
114 | + services = yaml_result.get('services') |
115 | + if isinstance(services, dict) and 'services' not in services: |
116 | + self.version = 4 |
117 | yaml_result = {config_file: yaml_result} |
118 | |
119 | self.yaml[config_file] = yaml_result |
120 | @@ -64,7 +64,7 @@ |
121 | return sorted(self.data) |
122 | |
123 | def get(self, key): |
124 | - if not key in self.data: |
125 | + if key not in self.data: |
126 | self.log.warning("Deployment %r not found. Available %s", |
127 | key, ", ".join(self.keys())) |
128 | raise ErrorExit() |
129 | @@ -99,7 +99,7 @@ |
130 | return parents |
131 | |
132 | def _resolve_inherited(self, deploy_data): |
133 | - if not 'inherits' in deploy_data: |
134 | + if 'inherits' not in deploy_data: |
135 | return deploy_data |
136 | inherits = parents = self._inherits(deploy_data) |
137 | for parent_name in parents: |
138 | |
139 | === modified file 'deployer/deployment.py' |
140 | --- deployer/deployment.py 2015-05-13 14:16:46 +0000 |
141 | +++ deployer/deployment.py 2015-08-06 14:43:20 +0000 |
142 | @@ -38,7 +38,7 @@ |
143 | pprint.pprint(self.data) |
144 | |
145 | def get_service(self, name): |
146 | - if not name in self.data['services']: |
147 | + if name not in self.data['services']: |
148 | return |
149 | return Service(name, self.data['services'][name]) |
150 | |
151 | @@ -95,9 +95,9 @@ |
152 | # Check for colocation. This naively assumes that there is no |
153 | # circularity in placements. |
154 | if x_in_y(svc_b, svc_a): |
155 | + return 1 |
156 | + if x_in_y(svc_a, svc_b): |
157 | return -1 |
158 | - if x_in_y(svc_a, svc_b): |
159 | - return 1 |
160 | # If no colocation exists, simply compare names. |
161 | return cmp(svc_a.name, svc_b.name) |
162 | return 1 |
163 | |
164 | === modified file 'deployer/service.py' |
165 | --- deployer/service.py 2015-03-18 18:19:43 +0000 |
166 | +++ deployer/service.py 2015-08-06 14:43:20 +0000 |
167 | @@ -220,7 +220,7 @@ |
168 | |
169 | def _fill_placement(self): |
170 | """Fill the placement spec with necessary data. |
171 | - |
172 | + |
173 | From the spec: |
174 | A unit placement may be specified with a service name only, in which |
175 | case its unit number is assumed to be one more than the unit number of |
176 | @@ -238,7 +238,7 @@ |
177 | return |
178 | |
179 | self.service.svc_data['to'] = ( |
180 | - unit_mapping + |
181 | + unit_mapping + |
182 | list(itertools.repeat(unit_mapping[-1], unit_count - len(unit_mapping))) |
183 | ) |
184 | unit_mapping = self.service.unit_placement |
185 | @@ -256,9 +256,9 @@ |
186 | |
187 | def _parse_placement(self, placement): |
188 | """Parse a unit placement statement. |
189 | - |
190 | + |
191 | In version 4 bundles, unit placement statements take the form of |
192 | - |
193 | + |
194 | (<containertype>:)?(<unit>|<machine>|new) |
195 | |
196 | This splits the placement into a container, a placement, and a unit |
197 | @@ -278,7 +278,7 @@ |
198 | containers, and/or services must be internally consistent, consistent |
199 | with other services in the deployment, and consistent with any machines |
200 | specified in the 'machines' block of the deployment. |
201 | - |
202 | + |
203 | A feedback object is returned, potentially with errors and warnings |
204 | inside it. |
205 | """ |
206 | @@ -294,55 +294,51 @@ |
207 | |
208 | services = dict([(s.name, s) for s in self.deployment.get_services()]) |
209 | machines = self.deployment.get_machines() |
210 | - container = None |
211 | - unit_number = None |
212 | + service_name = self.service.name |
213 | |
214 | for i, placement in enumerate(unit_placement): |
215 | - container, placement, unit_number = self._parse_placement(placement) |
216 | + container, target, unit_number = self._parse_placement(placement) |
217 | |
218 | + # Validate the container type. |
219 | if container and container not in ('lxc', 'kvm'): |
220 | feedback.error( |
221 | - "Invalid container type: %s service: %s placement: %s" \ |
222 | - % (container, self.service.name, unit_placement[i])) |
223 | - # XXX Nesting containers not supported yet. |
224 | - # Makyo - 2015-03-01 |
225 | - if container is not None and not (placement.isdigit() |
226 | - or placement == 'new'): |
227 | - feedback.error( |
228 | - "Invalid target for container: %s" % ( |
229 | - unit_placement[i])) |
230 | + 'Invalid container type: {} service: {} placement: {}' |
231 | + ''.format(container, service_name, placement)) |
232 | # Specify an existing machine (or, if the number is in the |
233 | # list of machine specs, one of those). |
234 | - if placement.isdigit(): |
235 | - if placement in machines: |
236 | - continue |
237 | - else: |
238 | - feedback.error( |
239 | - ("Service placement to machine " |
240 | - "not supported %s to %s") % ( |
241 | - self.service.name, unit_placement[i])) |
242 | - # Specify a machine from the machine spec. |
243 | - elif placement in self.deployment.get_machines(): |
244 | + if str(target) in machines: |
245 | continue |
246 | - # Specify a service for colocation. |
247 | - elif placement in services: |
248 | - # Specify a particular unit for colocation. |
249 | - if unit_number is not None and \ |
250 | - unit_number > services[placement].num_units: |
251 | - feedback.error( |
252 | - "Service unit does not exist, %s to %s/%s" % ( |
253 | - self.service.name, placement, unit_number)) |
254 | - elif self.deployment.get_charm_for(placement).is_subordinate(): |
255 | - feedback.error( |
256 | - "Cannot place to a subordinate service: %s -> %s" % ( |
257 | - self.service.name, placement)) |
258 | + if target.isdigit(): |
259 | + feedback.error( |
260 | + 'Service placement to machine not supported: {} to {}' |
261 | + ''.format(service_name, placement)) |
262 | + # Specify a service for co-location. |
263 | + elif target in services: |
264 | + # Specify a particular unit for co-location. |
265 | + if unit_number is not None: |
266 | + try: |
267 | + unit_number = int(unit_number) |
268 | + except (TypeError, ValueError): |
269 | + feedback.error( |
270 | + 'Invalid unit number for placement: {} to {}' |
271 | + ''.format(service_name, unit_number)) |
272 | + continue |
273 | + if unit_number > services[target].num_units: |
274 | + feedback.error( |
275 | + 'Service unit does not exist: {} to {}/{}' |
276 | + ''.format(service_name, target, unit_number)) |
277 | + continue |
278 | + if self.deployment.get_charm_for(target).is_subordinate(): |
279 | + feedback.error( |
280 | + 'Cannot place to a subordinate service: {} -> {}' |
281 | + ''.format(service_name, target)) |
282 | # Create a new machine or container. |
283 | - elif placement == 'new': |
284 | + elif target == 'new': |
285 | continue |
286 | else: |
287 | feedback.error( |
288 | - "Invalid service placement %s to %s" % ( |
289 | - self.service.name, unit_placement[i])) |
290 | + 'Invalid service placement: {} to {}' |
291 | + ''.format(service_name, placement)) |
292 | return feedback |
293 | |
294 | def get_new_machines_for_containers(self): |
295 | @@ -367,6 +363,8 @@ |
296 | svc = self.service |
297 | |
298 | unit_mapping = svc.unit_placement |
299 | + if not unit_mapping: |
300 | + return None |
301 | unit_placement = placement = str(unit_mapping[unit_number]) |
302 | container = None |
303 | u_idx = unit_number |
304 | @@ -375,14 +373,17 @@ |
305 | if placement == 'new': |
306 | return None |
307 | |
308 | - container, placement, unit_number = self._parse_placement(unit_placement) |
309 | + container, placement, unit_number = self._parse_placement( |
310 | + unit_placement) |
311 | |
312 | if placement in self.machines_map: |
313 | - return self._format_placement(self.machines_map[placement], container) |
314 | + return self._format_placement( |
315 | + self.machines_map[placement], container) |
316 | |
317 | # Handle <container_type>:new |
318 | if placement == 'new': |
319 | return self._format_placement( |
320 | - self.machines_map['%s/%d' % (self.service.name, u_idx)], container) |
321 | + self.machines_map['%s/%d' % (self.service.name, u_idx)], |
322 | + container) |
323 | |
324 | return self.colocate(status, placement, u_idx, container, svc) |
325 | |
326 | === modified file 'deployer/tests/base.py' |
327 | --- deployer/tests/base.py 2015-03-18 18:19:43 +0000 |
328 | +++ deployer/tests/base.py 2015-08-06 14:43:20 +0000 |
329 | @@ -6,6 +6,8 @@ |
330 | import StringIO |
331 | import tempfile |
332 | |
333 | +import mock |
334 | + |
335 | import deployer |
336 | from deployer.config import ConfigStack |
337 | |
338 | @@ -77,3 +79,37 @@ |
339 | os.environ.update(original_environ) |
340 | |
341 | os.environ.update(kw) |
342 | + |
343 | + |
344 | +def patch_env_status(env, services, machines=()): |
345 | + """Simulate that the given mock env has the status described in services. |
346 | + |
347 | + This function is used so that tests do not have to wait minutes for |
348 | + service units presence when the importer is used with the given env. |
349 | + |
350 | + The services argument is a dict mapping service names with the number of |
351 | + their units. This will be reflected by the status returned when the |
352 | + importer adds the units (see "deployer.action.importer.Importer.add_unit"). |
353 | + |
354 | + The machines argument can be used to simulate that the given machines are |
355 | + present in the Juju environment. |
356 | + """ |
357 | + services_status = dict( |
358 | + (k, {'units': dict((i, {}) for i in range(v))}) |
359 | + for k, v in services.items() |
360 | + ) |
361 | + machines_status = dict((i, {}) for i in machines) |
362 | + env.status.side_effect = [ |
363 | + # There are no services initially. |
364 | + {'services': {}, 'machines': machines_status}, |
365 | + {'services': {}, 'machines': machines_status}, |
366 | + # This is the workaround check for subordinate charms presence: |
367 | + # see lp:1421315 for details. |
368 | + {'services': services_status, 'machines': machines_status}, |
369 | + {'services': services_status, 'machines': machines_status}, |
370 | + # After we exited the workaround loop, we can just mock further |
371 | + # status results. |
372 | + mock.MagicMock(), |
373 | + mock.MagicMock(), |
374 | + mock.MagicMock(), |
375 | + ] |
376 | |
377 | === modified file 'deployer/tests/test_data/v4/container-existing-machine.yaml' |
378 | --- deployer/tests/test_data/v4/container-existing-machine.yaml 2015-05-13 14:06:34 +0000 |
379 | +++ deployer/tests/test_data/v4/container-existing-machine.yaml 2015-08-06 14:43:20 +0000 |
380 | @@ -39,4 +39,4 @@ |
381 | - - mediawiki:db |
382 | - mysql:db |
383 | machines: |
384 | - 1: 1 |
385 | + 1: {} |
386 | |
387 | === added file 'deployer/tests/test_data/v4/placement-invalid-number.yaml' |
388 | --- deployer/tests/test_data/v4/placement-invalid-number.yaml 1970-01-01 00:00:00 +0000 |
389 | +++ deployer/tests/test_data/v4/placement-invalid-number.yaml 2015-08-06 14:43:20 +0000 |
390 | @@ -0,0 +1,8 @@ |
391 | +series: trusty |
392 | +services: |
393 | + django: |
394 | + charm: cs:trusty/django |
395 | + to: |
396 | + - lxc:haproxy/bad-wolf |
397 | + haproxy: |
398 | + charm: cs:trusty/haproxy |
399 | |
400 | === modified file 'deployer/tests/test_deployment.py' |
401 | --- deployer/tests/test_deployment.py 2015-03-18 18:19:43 +0000 |
402 | +++ deployer/tests/test_deployment.py 2015-08-06 14:43:20 +0000 |
403 | @@ -106,12 +106,17 @@ |
404 | d._machines_placement_sort( |
405 | FauxService(name="x", unit_placement=['asdf']), |
406 | FauxService(name="y", unit_placement=['lxc:x/1']) |
407 | - ), 1) |
408 | + ), -1) |
409 | self.assertEqual( |
410 | d._machines_placement_sort( |
411 | FauxService(name="y", unit_placement=['lxc:x/1']), |
412 | FauxService(name="x", unit_placement=['asdf']) |
413 | - ), -1) |
414 | + ), 1) |
415 | + self.assertEqual( |
416 | + d._machines_placement_sort( |
417 | + FauxService(name="x", unit_placement=['y']), |
418 | + FauxService(name="y") |
419 | + ), 1) |
420 | self.assertEqual( |
421 | d._machines_placement_sort( |
422 | FauxService(name="x", unit_placement=['asdf']), |
423 | @@ -195,12 +200,12 @@ |
424 | self.assertIn( |
425 | 'Cannot place to a subordinate service: nova-compute -> nrpe\n', |
426 | output) |
427 | - |
428 | + |
429 | @skip_if_offline |
430 | def test_validate_invalid_placement_subordinate_v4(self): |
431 | # Placement validation fails if a subordinate charm is provided. |
432 | deployment = self.get_deployment_and_fetch_v4( |
433 | - 'placement-invalid-subordinate.yaml') |
434 | + 'placement-invalid-subordinate.yaml') |
435 | with self.assertRaises(ErrorExit): |
436 | deployment.validate_placement() |
437 | output = self.output.getvalue() |
438 | @@ -208,6 +213,15 @@ |
439 | 'Cannot place to a subordinate service: nova-compute -> nrpe\n', |
440 | output) |
441 | |
442 | + def test_validate_invalid_unit_number(self): |
443 | + # Placement validation fails if an invalid unit number is provided. |
444 | + deployment = self.get_deployment_v4('placement-invalid-number.yaml') |
445 | + with self.assertRaises(ErrorExit): |
446 | + deployment.validate_placement() |
447 | + output = self.output.getvalue() |
448 | + self.assertIn( |
449 | + 'Invalid unit number for placement: django to bad-wolf\n', output) |
450 | + |
451 | def test_get_unit_placement_v3(self): |
452 | d = self.get_named_deployment_v3("stack-placement.yaml", "stack") |
453 | status = { |
454 | @@ -281,12 +295,11 @@ |
455 | feedback = placement.validate() |
456 | self.assertEqual(feedback.get_errors(), [ |
457 | 'Invalid container type: asdf service: mysql placement: asdf:0', |
458 | - 'Service placement to machine not supported mysql to asdf:0', |
459 | - 'Invalid target for container: lxc:asdf', |
460 | - 'Invalid service placement mysql to lxc:asdf', |
461 | - 'Service placement to machine not supported mysql to 1', |
462 | - 'Service unit does not exist, mysql to wordpress/3', |
463 | - 'Invalid service placement mysql to asdf']) |
464 | + 'Service placement to machine not supported: mysql to asdf:0', |
465 | + 'Invalid service placement: mysql to lxc:asdf', |
466 | + 'Service placement to machine not supported: mysql to 1', |
467 | + 'Service unit does not exist: mysql to wordpress/3', |
468 | + 'Invalid service placement: mysql to asdf']) |
469 | |
470 | def test_get_unit_placement_v4_simple(self): |
471 | d = self.get_deployment_v4('simple.yaml') |
472 | |
473 | === modified file 'deployer/tests/test_guiserver.py' |
474 | --- deployer/tests/test_guiserver.py 2015-03-17 10:40:34 +0000 |
475 | +++ deployer/tests/test_guiserver.py 2015-08-06 14:43:20 +0000 |
476 | @@ -11,7 +11,10 @@ |
477 | |
478 | from deployer import guiserver |
479 | from deployer.feedback import Feedback |
480 | -from deployer.tests.base import skip_if_offline |
481 | +from deployer.tests.base import ( |
482 | + patch_env_status, |
483 | + skip_if_offline, |
484 | +) |
485 | |
486 | |
487 | class TestGetDefaultGuiserverOptions(unittest.TestCase): |
488 | @@ -268,6 +271,8 @@ |
489 | def test_importer_behavior(self, mock_sleep, mock_environment): |
490 | # The importer executes the expected environment calls. |
491 | self.addCleanup(self.cleanup_series_path) |
492 | + patch_env_status(mock_environment(), {'mysql': 1, 'wordpress': 1}) |
493 | + mock_environment.reset_mock() |
494 | with self.patch_juju_home(): |
495 | self.import_bundle() |
496 | mock_sleep.assert_has_calls([mock.call(5.1), mock.call(60)]) |
497 | @@ -289,8 +294,7 @@ |
498 | None, 1, None), |
499 | mock.call.set_annotation( |
500 | 'wordpress', {'gui-y': '414.547', 'gui-x': '425.347'}), |
501 | - mock.call.add_units('mysql', 2), |
502 | - mock.call.add_units('wordpress', 1), |
503 | + mock.call.add_units('mysql', 1), |
504 | mock.call.add_relation('mysql:db', 'wordpress:db'), |
505 | mock.call.close(), |
506 | ], any_order=True) |
507 | |
508 | === modified file 'deployer/tests/test_importer.py' |
509 | --- deployer/tests/test_importer.py 2015-05-13 14:06:34 +0000 |
510 | +++ deployer/tests/test_importer.py 2015-08-06 14:43:20 +0000 |
511 | @@ -6,7 +6,11 @@ |
512 | from deployer.action.importer import Importer |
513 | from deployer.utils import yaml_dump, yaml_load |
514 | |
515 | -from base import Base, skip_if_offline |
516 | +from base import ( |
517 | + Base, |
518 | + patch_env_status, |
519 | + skip_if_offline, |
520 | +) |
521 | |
522 | |
523 | class Options(dict): |
524 | @@ -51,6 +55,7 @@ |
525 | stack = ConfigStack(self.options.configs) |
526 | deploy = stack.get('wiki') |
527 | env = mock.MagicMock() |
528 | + patch_env_status(env, {'wiki': 1, 'db': 1}) |
529 | importer = Importer(env, deploy, self.options) |
530 | importer.run() |
531 | |
532 | @@ -60,10 +65,6 @@ |
533 | 'wiki', 'cs:precise/mediawiki', '', config, None, 1, None)) |
534 | env.add_relation.assert_called_once_with('wiki', 'db') |
535 | |
536 | - self.assertEqual( |
537 | - yaml_load(yaml_dump(yaml_load(yaml_dump(config)))), |
538 | - config) |
539 | - |
540 | @skip_if_offline |
541 | @mock.patch('deployer.action.importer.time') |
542 | def test_importer_no_relations(self, mock_time): |
543 | @@ -71,10 +72,10 @@ |
544 | stack = ConfigStack(self.options.configs) |
545 | deploy = stack.get('wiki') |
546 | env = mock.MagicMock() |
547 | + patch_env_status(env, {'wiki': 1, 'db': 1}) |
548 | importer = Importer(env, deploy, self.options) |
549 | importer.run() |
550 | - |
551 | - self.assertFalse(env.add_relation.called, False) |
552 | + self.assertFalse(env.add_relation.called) |
553 | |
554 | @skip_if_offline |
555 | @mock.patch('deployer.action.importer.time') |
556 | @@ -84,6 +85,7 @@ |
557 | stack = ConfigStack(self.options.configs) |
558 | deploy = stack.get(self.options.configs[0]) |
559 | env = mock.MagicMock() |
560 | + patch_env_status(env, {'mediawiki': 1, 'mysql': 1}) |
561 | importer = Importer(env, deploy, self.options) |
562 | importer.run() |
563 | |
564 | @@ -104,14 +106,8 @@ |
565 | stack = ConfigStack(self.options.configs) |
566 | deploy = stack.get(self.options.configs[0]) |
567 | env = mock.MagicMock() |
568 | - |
569 | - config = {'status.return_value.get.return_value': { |
570 | - 1: {} |
571 | - }} |
572 | - |
573 | - env.configure_mock(**config) |
574 | + patch_env_status(env, {'mediawiki': 1, 'mysql': 1}, machines=[1]) |
575 | |
576 | importer = Importer(env, deploy, self.options) |
577 | importer.run() |
578 | - |
579 | - env.add_machine.assert_not_called() |
580 | + self.assertFalse(env.add_machine.called) |
581 | |
582 | === modified file 'setup.py' |
583 | --- setup.py 2015-08-04 17:57:46 +0000 |
584 | +++ setup.py 2015-08-06 14:43:20 +0000 |
585 | @@ -3,7 +3,7 @@ |
586 | |
587 | setup( |
588 | name="juju-deployer", |
589 | - version="0.5.0", |
590 | + version="0.5.1", |
591 | description="A tool for deploying complex stacks with juju.", |
592 | long_description=open("README").read(), |
593 | author="Kapil Thangavelu", |
One inline comment, plus I wonder if you could point out which change fixes the test memory leak problem? Otherwise looks good.