Merge lp:~hazmat/juju-deployer/unit-placement into lp:juju-deployer
- unit-placement
- Merge into trunk
Status: | Merged |
---|---|
Approved by: | Adam Gandelman |
Approved revision: | 91 |
Merged at revision: | 80 |
Proposed branch: | lp:~hazmat/juju-deployer/unit-placement |
Merge into: | lp:juju-deployer |
Diff against target: |
731 lines (+436/-42) 14 files modified
configs/export.yml (+17/-0) deployer/action/diff.py (+1/-1) deployer/action/importer.py (+55/-23) deployer/deployment.py (+111/-1) deployer/env/base.py (+3/-0) deployer/env/go.py (+68/-6) deployer/service.py (+11/-3) deployer/tests/test_config.py (+5/-6) deployer/tests/test_data/stack-placement-invalid-2.yaml (+13/-0) deployer/tests/test_data/stack-placement-invalid.yaml (+13/-0) deployer/tests/test_data/stack-placement.yaml (+18/-0) deployer/tests/test_deployment.py (+66/-1) doc/config.rst (+54/-0) setup.py (+1/-1) |
To merge this branch: | bzr merge lp:~hazmat/juju-deployer/unit-placement |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Adam Gandelman (community) | Needs Fixing | ||
Review via email:
|
Commit message
Description of the change
Support for unit placement using deployer configuration. Also improve terminate/reset support with containers, and a fix for --diff output.
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Kapil Thangavelu (hazmat) wrote : | # |
The second one is due to a need for 0.13 for jujuclient, i'll update the
dep spec for that and add some test for the first case.
On Thu, Oct 31, 2013 at 1:41 PM, Adam Gandelman <email address hidden> wrote:
> Review: Needs Fixing
>
> Testing this now, couple of issues so far:
>
> If a service is configured as:
>
> fooservice:
> to: [0]
>
> deployer blows up:
>
> 70, in get_unit_placement
> if ':' in unit_placement:
> TypeError: argument of type 'int' is not iterable
>
> Only running that code if unit_placement is a string seems to fix that.
>
> Adding units fails:
>
> 2013-10-31 17:34:26 [INFO] deployer.import: Adding 2 more units to
> nova-compute
> Traceback (most recent call last):
> File "/usr/local/
> load_entry_
> 'juju-deployer')()
> File
> "/usr/local/
> line 118, in main
> run()
> File
> "/usr/local/
> line 209, in run
> importer.
> File
> "/usr/local/
> line 186, in run
> self.add_units()
> File
> "/usr/local/
> line 54, in add_units
> self.env.
> File
> "/usr/local/
> line 28, in add_unit
> return self.client.
> File
> "/usr/local/
> line 465, in add_unit
> "Params": params})
> File
> "/usr/local/
> line 135, in _rpc
> raise EnvError(result)
> jujuclient.
> { u'Error': u'must add at least one unit', u'RequestId': 1,
> u'Response': { }}
>
> --
>
> https:/
> You are the owner of lp:~hazmat/juju-deployer/unit-placement.
>
- 89. By Kapil Thangavelu
-
fix for machine 0 placement
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Kapil Thangavelu (hazmat) wrote : | # |
Fixes pushed
- 90. By Kapil Thangavelu
-
require newer version of jujuclient
- 91. By Kapil Thangavelu
-
by request do a wait for units before adding relations.
Preview Diff
1 | === added file 'configs/export.yml' | |||
2 | --- configs/export.yml 1970-01-01 00:00:00 +0000 | |||
3 | +++ configs/export.yml 2013-11-01 15:46:48 +0000 | |||
4 | @@ -0,0 +1,17 @@ | |||
5 | 1 | envExport: | ||
6 | 2 | services: | ||
7 | 3 | mysql: | ||
8 | 4 | charm: "cs:precise/mysql-27" | ||
9 | 5 | annotations: | ||
10 | 6 | "gui-x": "-251" | ||
11 | 7 | "gui-y": "-203" | ||
12 | 8 | wordpress: | ||
13 | 9 | charm: "cs:precise/wordpress-20" | ||
14 | 10 | num_units: 3 | ||
15 | 11 | to: ['lxc:mysql', 'lxc:mysql'] | ||
16 | 12 | annotations: | ||
17 | 13 | "gui-x": "116" | ||
18 | 14 | "gui-y": "-206" | ||
19 | 15 | relations: | ||
20 | 16 | - - "wordpress:db" | ||
21 | 17 | - "mysql:db" | ||
22 | 0 | 18 | ||
23 | === modified file 'deployer/action/diff.py' | |||
24 | --- deployer/action/diff.py 2013-07-22 15:29:31 +0000 | |||
25 | +++ deployer/action/diff.py 2013-11-01 15:46:48 +0000 | |||
26 | @@ -134,7 +134,7 @@ | |||
27 | 134 | if e_v != v: | 134 | if e_v != v: |
28 | 135 | mod['config'] = {k: e_v} | 135 | mod['config'] = {k: e_v} |
29 | 136 | if e_s['unit_count'] != d_s.get('num_units', 1): | 136 | if e_s['unit_count'] != d_s.get('num_units', 1): |
31 | 137 | mod['num_units'] = e_s['num_units'] | 137 | mod['num_units'] = e_s['unit_count'] - d_s['num_units'] |
32 | 138 | return mod | 138 | return mod |
33 | 139 | 139 | ||
34 | 140 | def run(self): | 140 | def run(self): |
35 | 141 | 141 | ||
36 | === modified file 'deployer/action/importer.py' | |||
37 | --- deployer/action/importer.py 2013-10-02 17:50:05 +0000 | |||
38 | +++ deployer/action/importer.py 2013-11-01 15:46:48 +0000 | |||
39 | @@ -19,25 +19,41 @@ | |||
40 | 19 | self.log.debug("Adding units...") | 19 | self.log.debug("Adding units...") |
41 | 20 | # Add units to existing services that don't match count. | 20 | # Add units to existing services that don't match count. |
42 | 21 | env_status = self.env.status() | 21 | env_status = self.env.status() |
44 | 22 | added = set() | 22 | reloaded = False |
45 | 23 | |||
46 | 23 | for svc in self.deployment.get_services(): | 24 | for svc in self.deployment.get_services(): |
61 | 24 | delta = (svc.num_units - | 25 | cur_units = len(env_status['services'][svc.name].get('units', ())) |
62 | 25 | len(env_status['services'][svc.name].get('units', ()))) | 26 | delta = (svc.num_units - cur_units) |
63 | 26 | if delta > 0: | 27 | |
64 | 27 | charm = self.deployment.get_charm_for(svc.name) | 28 | if delta <= 0: |
51 | 28 | if charm.is_subordinate(): | ||
52 | 29 | self.log.warning( | ||
53 | 30 | "Config specifies num units for subordinate: %s", | ||
54 | 31 | svc.name) | ||
55 | 32 | continue | ||
56 | 33 | self.log.info( | ||
57 | 34 | "Adding %d more units to %s" % (abs(delta), svc.name)) | ||
58 | 35 | for u in self.env.add_units(svc.name, abs(delta)): | ||
59 | 36 | added.add(u) | ||
60 | 37 | else: | ||
65 | 38 | self.log.debug( | 29 | self.log.debug( |
66 | 39 | " Service %r does not need any more units added.", | 30 | " Service %r does not need any more units added.", |
67 | 40 | svc.name) | 31 | svc.name) |
68 | 32 | continue | ||
69 | 33 | |||
70 | 34 | charm = self.deployment.get_charm_for(svc.name) | ||
71 | 35 | if charm.is_subordinate(): | ||
72 | 36 | self.log.warning( | ||
73 | 37 | "Config specifies num units for subordinate: %s", | ||
74 | 38 | svc.name) | ||
75 | 39 | continue | ||
76 | 40 | |||
77 | 41 | self.log.info( | ||
78 | 42 | "Adding %d more units to %s" % (abs(delta), svc.name)) | ||
79 | 43 | if svc.unit_placement: | ||
80 | 44 | # Reload status once after non placed services units are done. | ||
81 | 45 | if reloaded is False: | ||
82 | 46 | # Crappy workaround juju-core api inconsistency | ||
83 | 47 | time.sleep(5.1) | ||
84 | 48 | env_status = self.env.status() | ||
85 | 49 | reloaded = True | ||
86 | 50 | |||
87 | 51 | for mid in range(cur_units, svc.num_units): | ||
88 | 52 | mspec = self.deployment.get_unit_placement( | ||
89 | 53 | svc, mid, env_status) | ||
90 | 54 | self.env.add_unit(svc.name, mspec) | ||
91 | 55 | else: | ||
92 | 56 | self.env.add_units(svc.name, abs(delta)) | ||
93 | 41 | 57 | ||
94 | 42 | def get_charms(self): | 58 | def get_charms(self): |
95 | 43 | # Get Charms | 59 | # Get Charms |
96 | @@ -53,6 +69,8 @@ | |||
97 | 53 | def deploy_services(self): | 69 | def deploy_services(self): |
98 | 54 | self.log.info("Deploying services...") | 70 | self.log.info("Deploying services...") |
99 | 55 | env_status = self.env.status() | 71 | env_status = self.env.status() |
100 | 72 | reloaded = False | ||
101 | 73 | |||
102 | 56 | for svc in self.deployment.get_services(): | 74 | for svc in self.deployment.get_services(): |
103 | 57 | if svc.name in env_status['services']: | 75 | if svc.name in env_status['services']: |
104 | 58 | self.log.debug( | 76 | self.log.debug( |
105 | @@ -62,14 +80,28 @@ | |||
106 | 62 | charm = self.deployment.get_charm_for(svc.name) | 80 | charm = self.deployment.get_charm_for(svc.name) |
107 | 63 | self.log.info( | 81 | self.log.info( |
108 | 64 | " Deploying service %s using %s", svc.name, charm.charm_url) | 82 | " Deploying service %s using %s", svc.name, charm.charm_url) |
109 | 83 | |||
110 | 84 | if svc.unit_placement: | ||
111 | 85 | # We sorted all the non placed services first, so we only | ||
112 | 86 | # need to update status once after we're done with them. | ||
113 | 87 | if not reloaded: | ||
114 | 88 | self.log.debug( | ||
115 | 89 | " Refetching status for placement deploys") | ||
116 | 90 | time.sleep(5.1) | ||
117 | 91 | env_status = self.env.status() | ||
118 | 92 | reloaded = True | ||
119 | 93 | num_units = 1 | ||
120 | 94 | else: | ||
121 | 95 | num_units = svc.num_units | ||
122 | 96 | |||
123 | 65 | self.env.deploy( | 97 | self.env.deploy( |
124 | 66 | svc.name, | 98 | svc.name, |
125 | 67 | charm.charm_url, | 99 | charm.charm_url, |
126 | 68 | self.deployment.repo_path, | 100 | self.deployment.repo_path, |
127 | 69 | svc.config, | 101 | svc.config, |
128 | 70 | svc.constraints, | 102 | svc.constraints, |
131 | 71 | svc.num_units, | 103 | num_units, |
132 | 72 | svc.force_machine) | 104 | self.deployment.get_unit_placement(svc, 0, env_status)) |
133 | 73 | 105 | ||
134 | 74 | if svc.expose: | 106 | if svc.expose: |
135 | 75 | self.log.info(" Exposing service %r" % svc.name) | 107 | self.log.info(" Exposing service %r" % svc.name) |
136 | @@ -93,8 +125,8 @@ | |||
137 | 93 | self.env.add_relation(end_a, end_b) | 125 | self.env.add_relation(end_a, end_b) |
138 | 94 | created = True | 126 | created = True |
139 | 95 | # per the original, not sure the use case. | 127 | # per the original, not sure the use case. |
142 | 96 | self.log.debug(" Waiting 5s before next relation") | 128 | #self.log.debug(" Waiting 5s before next relation") |
143 | 97 | time.sleep(5) | 129 | #time.sleep(5) |
144 | 98 | return created | 130 | return created |
145 | 99 | 131 | ||
146 | 100 | def _rel_exists(self, status, end_a, end_b): | 132 | def _rel_exists(self, status, end_a, end_b): |
147 | @@ -149,17 +181,17 @@ | |||
148 | 149 | # to be consistent to subsequent watch api interactions, see | 181 | # to be consistent to subsequent watch api interactions, see |
149 | 150 | # http://pad.lv/1203105 which will obviate this wait. | 182 | # http://pad.lv/1203105 which will obviate this wait. |
150 | 151 | time.sleep(5.1) | 183 | time.sleep(5.1) |
151 | 184 | self.add_units() | ||
152 | 152 | 185 | ||
153 | 186 | self.log.debug("Waiting for units before adding relations") | ||
154 | 153 | self.wait_for_units() | 187 | self.wait_for_units() |
155 | 154 | self.add_units() | ||
156 | 155 | 188 | ||
157 | 156 | rels_created = self.add_relations() | 189 | rels_created = self.add_relations() |
158 | 157 | 190 | ||
159 | 158 | # Wait for the units to be up before waiting for rel stability. | 191 | # Wait for the units to be up before waiting for rel stability. |
160 | 159 | self.log.debug("Waiting for units to be started") | ||
161 | 160 | self.wait_for_units(self.options.retry_count) | ||
162 | 161 | if rels_created: | 192 | if rels_created: |
164 | 162 | self.log.debug("Waiting for relations %d", self.options.rel_wait) | 193 | self.log.debug( |
165 | 194 | "Waiting for relation convergence %ds", self.options.rel_wait) | ||
166 | 163 | time.sleep(self.options.rel_wait) | 195 | time.sleep(self.options.rel_wait) |
167 | 164 | self.wait_for_units(self.options.retry_count) | 196 | self.wait_for_units(self.options.retry_count) |
168 | 165 | 197 | ||
169 | 166 | 198 | ||
170 | === modified file 'deployer/deployment.py' | |||
171 | --- deployer/deployment.py 2013-07-24 23:10:15 +0000 | |||
172 | +++ deployer/deployment.py 2013-11-01 15:46:48 +0000 | |||
173 | @@ -40,8 +40,74 @@ | |||
174 | 40 | return Service(name, self.data['services'][name]) | 40 | return Service(name, self.data['services'][name]) |
175 | 41 | 41 | ||
176 | 42 | def get_services(self): | 42 | def get_services(self): |
177 | 43 | services = [] | ||
178 | 43 | for name, svc_data in self.data.get('services', {}).items(): | 44 | for name, svc_data in self.data.get('services', {}).items(): |
180 | 44 | yield Service(name, svc_data) | 45 | services.append(Service(name, svc_data)) |
181 | 46 | services.sort(self._placement_sort) | ||
182 | 47 | return services | ||
183 | 48 | |||
184 | 49 | @staticmethod | ||
185 | 50 | def _placement_sort(svc_a, svc_b): | ||
186 | 51 | if svc_a.unit_placement: | ||
187 | 52 | if svc_b.unit_placement: | ||
188 | 53 | return cmp(svc_a.name, svc_b.name) | ||
189 | 54 | return 1 | ||
190 | 55 | if svc_b.unit_placement: | ||
191 | 56 | return -1 | ||
192 | 57 | return cmp(svc_a.name, svc_b.name) | ||
193 | 58 | |||
194 | 59 | @staticmethod | ||
195 | 60 | def _format_placement(machine, container=None): | ||
196 | 61 | if container: | ||
197 | 62 | return "%s:%s" % (container, machine) | ||
198 | 63 | else: | ||
199 | 64 | return machine | ||
200 | 65 | |||
201 | 66 | def get_unit_placement(self, svc, unit_number, status): | ||
202 | 67 | unit_mapping = svc.unit_placement | ||
203 | 68 | if not unit_mapping: | ||
204 | 69 | return None | ||
205 | 70 | if len(unit_mapping) <= unit_number: | ||
206 | 71 | return None | ||
207 | 72 | |||
208 | 73 | unit_placement = placement = str(unit_mapping[unit_number]) | ||
209 | 74 | container = None | ||
210 | 75 | u_idx = unit_number | ||
211 | 76 | |||
212 | 77 | if ':' in unit_placement: | ||
213 | 78 | container, placement = unit_placement.split(":") | ||
214 | 79 | if '=' in placement: | ||
215 | 80 | placement, u_idx = placement.split("=") | ||
216 | 81 | |||
217 | 82 | if placement.isdigit() and placement == "0": | ||
218 | 83 | return self._format_placement(placement, container) | ||
219 | 84 | |||
220 | 85 | with_service = status['services'].get(placement) | ||
221 | 86 | if with_service is None: | ||
222 | 87 | # Should be caught in validate relations but sanity check | ||
223 | 88 | # for concurrency. | ||
224 | 89 | self.log.error( | ||
225 | 90 | "Service %s to be deployed with non existant service %s", | ||
226 | 91 | svc.name, placement) | ||
227 | 92 | # Prefer continuing deployment with a new machine rather | ||
228 | 93 | # than an in-progress abort. | ||
229 | 94 | return None | ||
230 | 95 | |||
231 | 96 | svc_units = with_service['units'] | ||
232 | 97 | if len(svc_units) <= unit_number: | ||
233 | 98 | self.log.warning( | ||
234 | 99 | "Service:%s deploy-with Service:%s, but no with unit found", | ||
235 | 100 | svc.name, placement) | ||
236 | 101 | return None | ||
237 | 102 | unit_names = svc_units.keys() | ||
238 | 103 | unit_names.sort() | ||
239 | 104 | machine = svc_units[unit_names[int(u_idx)]].get('machine') | ||
240 | 105 | if not machine: | ||
241 | 106 | self.log.warning( | ||
242 | 107 | "Service:%s deploy-with unit missing machine %s", | ||
243 | 108 | svc.name, unit_names[unit_number]) | ||
244 | 109 | return None | ||
245 | 110 | return self._format_placement(machine, container) | ||
246 | 45 | 111 | ||
247 | 46 | def get_relations(self): | 112 | def get_relations(self): |
248 | 47 | if 'relations' not in self.data: | 113 | if 'relations' not in self.data: |
249 | @@ -117,6 +183,7 @@ | |||
250 | 117 | self.load_overrides(cli_overides) | 183 | self.load_overrides(cli_overides) |
251 | 118 | self.resolve_config() | 184 | self.resolve_config() |
252 | 119 | self.validate_relations() | 185 | self.validate_relations() |
253 | 186 | self.validate_placement() | ||
254 | 120 | 187 | ||
255 | 121 | def load_overrides(self, cli_overrides=()): | 188 | def load_overrides(self, cli_overrides=()): |
256 | 122 | """Load overrides.""" | 189 | """Load overrides.""" |
257 | @@ -195,6 +262,49 @@ | |||
258 | 195 | ep.service, "%s <-> %s" % (e_a, e_b)) | 262 | ep.service, "%s <-> %s" % (e_a, e_b)) |
259 | 196 | raise ErrorExit() | 263 | raise ErrorExit() |
260 | 197 | 264 | ||
261 | 265 | def validate_placement(self): | ||
262 | 266 | services = dict([(s.name, s) for s in self.get_services()]) | ||
263 | 267 | for name, s in services.items(): | ||
264 | 268 | unit_placement = s.unit_placement | ||
265 | 269 | if unit_placement is None: | ||
266 | 270 | continue | ||
267 | 271 | if not isinstance(unit_placement, list): | ||
268 | 272 | unit_placement = [unit_placement] | ||
269 | 273 | unit_placement = map(str, unit_placement) | ||
270 | 274 | for idx, p in enumerate(unit_placement): | ||
271 | 275 | if ':' in p: | ||
272 | 276 | container, p = p.split(':') | ||
273 | 277 | if container not in ('lxc', 'kvm'): | ||
274 | 278 | self.log.error( | ||
275 | 279 | "Invalid service:%s placement: %s", | ||
276 | 280 | name, unit_placement[idx]) | ||
277 | 281 | raise ErrorExit() | ||
278 | 282 | if '=' in p: | ||
279 | 283 | p, u_idx = p.split("=") | ||
280 | 284 | if not u_idx.isdigit(): | ||
281 | 285 | self.log.error( | ||
282 | 286 | "Invalid service:%s placement: %s", | ||
283 | 287 | name, unit_placement[idx]) | ||
284 | 288 | raise ErrorExit() | ||
285 | 289 | if p.isdigit() and p == '0': | ||
286 | 290 | continue | ||
287 | 291 | elif p.isdigit(): | ||
288 | 292 | self.log.error( | ||
289 | 293 | "Service placement to machine not supported %s to %s", | ||
290 | 294 | name, unit_placement[idx]) | ||
291 | 295 | raise ErrorExit() | ||
292 | 296 | elif p in services: | ||
293 | 297 | if services[p].unit_placement: | ||
294 | 298 | self.log.error( | ||
295 | 299 | "Nested placement not supported %s -> %s -> %s" % ( | ||
296 | 300 | name, p, services[p].unit_placement)) | ||
297 | 301 | raise ErrorExit() | ||
298 | 302 | else: | ||
299 | 303 | self.log.error( | ||
300 | 304 | "Invalid service placement %s to %s" % ( | ||
301 | 305 | name, unit_placement[idx])) | ||
302 | 306 | raise ErrorExit() | ||
303 | 307 | |||
304 | 198 | def save(self, path): | 308 | def save(self, path): |
305 | 199 | with open(path, "w") as fh: | 309 | with open(path, "w") as fh: |
306 | 200 | fh.write(yaml_dump(self.data)) | 310 | fh.write(yaml_dump(self.data)) |
307 | 201 | 311 | ||
308 | === modified file 'deployer/env/base.py' | |||
309 | --- deployer/env/base.py 2013-10-17 03:16:24 +0000 | |||
310 | +++ deployer/env/base.py 2013-11-01 15:46:48 +0000 | |||
311 | @@ -152,3 +152,6 @@ | |||
312 | 152 | stderr=fh) | 152 | stderr=fh) |
313 | 153 | status = yaml_load(output) | 153 | status = yaml_load(output) |
314 | 154 | return status | 154 | return status |
315 | 155 | |||
316 | 156 | def add_unit(self, service_name, machine_spec): | ||
317 | 157 | raise NotImplementedError() | ||
318 | 155 | 158 | ||
319 | === modified file 'deployer/env/go.py' | |||
320 | --- deployer/env/go.py 2013-10-08 20:13:39 +0000 | |||
321 | +++ deployer/env/go.py 2013-11-01 15:46:48 +0000 | |||
322 | @@ -5,7 +5,11 @@ | |||
323 | 5 | from .base import BaseEnvironment | 5 | from .base import BaseEnvironment |
324 | 6 | from ..utils import ErrorExit | 6 | from ..utils import ErrorExit |
325 | 7 | 7 | ||
327 | 8 | from jujuclient import Environment as EnvironmentClient, UnitErrors, EnvError | 8 | from jujuclient import ( |
328 | 9 | Environment as EnvironmentClient, | ||
329 | 10 | UnitErrors, | ||
330 | 11 | EnvError, | ||
331 | 12 | WatchWrapper) | ||
332 | 9 | 13 | ||
333 | 10 | 14 | ||
334 | 11 | class GoEnvironment(BaseEnvironment): | 15 | class GoEnvironment(BaseEnvironment): |
335 | @@ -20,6 +24,9 @@ | |||
336 | 20 | config = self._get_env_config() | 24 | config = self._get_env_config() |
337 | 21 | return config['admin-secret'] | 25 | return config['admin-secret'] |
338 | 22 | 26 | ||
339 | 27 | def add_unit(self, service_name, machine_spec): | ||
340 | 28 | return self.client.add_unit(service_name, machine_spec) | ||
341 | 29 | |||
342 | 23 | def add_units(self, service_name, num_units): | 30 | def add_units(self, service_name, num_units): |
343 | 24 | return self.client.add_units(service_name, num_units) | 31 | return self.client.add_units(service_name, num_units) |
344 | 25 | 32 | ||
345 | @@ -101,17 +108,50 @@ | |||
346 | 101 | if len(status['machines']) == 1: | 108 | if len(status['machines']) == 1: |
347 | 102 | return | 109 | return |
348 | 103 | 110 | ||
354 | 104 | for mid in status['machines'].keys(): | 111 | # containers before machines, container hosts post wait. |
355 | 105 | if mid == "0": | 112 | machines = status['machines'].keys() |
356 | 106 | continue | 113 | |
357 | 107 | self.log.debug(" Terminating machine %s", mid) | 114 | container_hosts = set() |
358 | 108 | self.terminate_machine(mid) | 115 | containers = set() |
359 | 116 | |||
360 | 117 | def machine_sort(x, y): | ||
361 | 118 | for ctype in ('lxc', 'kvm'): | ||
362 | 119 | for m in (x, y): | ||
363 | 120 | if ctype in m: | ||
364 | 121 | container_hosts.add(m.split('/', 1)[0]) | ||
365 | 122 | containers.add(m) | ||
366 | 123 | if m == x: | ||
367 | 124 | return -1 | ||
368 | 125 | if m == y: | ||
369 | 126 | return 1 | ||
370 | 127 | return cmp(x, y) | ||
371 | 128 | |||
372 | 129 | machines.sort(machine_sort) | ||
373 | 130 | |||
374 | 131 | for mid in machines: | ||
375 | 132 | self._terminate_machine(mid, container_hosts) | ||
376 | 133 | |||
377 | 134 | if containers: | ||
378 | 135 | watch = self.client.get_watch(120) | ||
379 | 136 | WaitForMachineTermination( | ||
380 | 137 | watch, containers).run(self._delta_event_log) | ||
381 | 138 | |||
382 | 139 | for mid in container_hosts: | ||
383 | 140 | self._terminate_machine(mid) | ||
384 | 109 | 141 | ||
385 | 110 | if terminate_wait: | 142 | if terminate_wait: |
386 | 111 | self.log.info(" Waiting for machine termination") | 143 | self.log.info(" Waiting for machine termination") |
387 | 112 | callback = watch and self._delta_event_log or None | 144 | callback = watch and self._delta_event_log or None |
388 | 113 | self.client.wait_for_no_machines(None, callback) | 145 | self.client.wait_for_no_machines(None, callback) |
389 | 114 | 146 | ||
390 | 147 | def _terminate_machine(self, mid, container_hosts=()): | ||
391 | 148 | if mid == "0": | ||
392 | 149 | return | ||
393 | 150 | if mid in container_hosts: | ||
394 | 151 | return | ||
395 | 152 | self.log.debug(" Terminating machine %s", mid) | ||
396 | 153 | self.terminate_machine(mid) | ||
397 | 154 | |||
398 | 115 | def _check_timeout(self, etime): | 155 | def _check_timeout(self, etime): |
399 | 116 | w_timeout = etime - time.time() | 156 | w_timeout = etime - time.time() |
400 | 117 | if w_timeout < 0: | 157 | if w_timeout < 0: |
401 | @@ -211,3 +251,25 @@ | |||
402 | 211 | eps[0]['Relation']['Name'], | 251 | eps[0]['Relation']['Name'], |
403 | 212 | eps[1]['ServiceName'], | 252 | eps[1]['ServiceName'], |
404 | 213 | eps[1]['Relation']['Name']) | 253 | eps[1]['Relation']['Name']) |
405 | 254 | |||
406 | 255 | |||
407 | 256 | class WaitForMachineTermination(WatchWrapper): | ||
408 | 257 | |||
409 | 258 | def __init__(self, watch, machines): | ||
410 | 259 | super(WaitForMachineTermination, self).__init__(watch) | ||
411 | 260 | self.machines = set(machines) | ||
412 | 261 | self.known = set() | ||
413 | 262 | |||
414 | 263 | def process(self, entity_type, change, data): | ||
415 | 264 | if entity_type != 'machine': | ||
416 | 265 | return | ||
417 | 266 | if change == 'remove' and data['Id'] in self.machines: | ||
418 | 267 | self.machines.remove(data['Id']) | ||
419 | 268 | else: | ||
420 | 269 | self.known.add(data['Id']) | ||
421 | 270 | |||
422 | 271 | def complete(self): | ||
423 | 272 | for m in self.machines: | ||
424 | 273 | if m in self.known: | ||
425 | 274 | return False | ||
426 | 275 | return True | ||
427 | 214 | 276 | ||
428 | === modified file 'deployer/service.py' | |||
429 | --- deployer/service.py 2013-09-13 13:36:22 +0000 | |||
430 | +++ deployer/service.py 2013-11-01 15:46:48 +0000 | |||
431 | @@ -4,6 +4,9 @@ | |||
432 | 4 | self.svc_data = svc_data | 4 | self.svc_data = svc_data |
433 | 5 | self.name = name | 5 | self.name = name |
434 | 6 | 6 | ||
435 | 7 | def __repr__(self): | ||
436 | 8 | return "<Service %s>" % (self.name) | ||
437 | 9 | |||
438 | 7 | @property | 10 | @property |
439 | 8 | def config(self): | 11 | def config(self): |
440 | 9 | return self.svc_data.get('options', None) | 12 | return self.svc_data.get('options', None) |
441 | @@ -17,9 +20,14 @@ | |||
442 | 17 | return int(self.svc_data.get('num_units', 1)) | 20 | return int(self.svc_data.get('num_units', 1)) |
443 | 18 | 21 | ||
444 | 19 | @property | 22 | @property |
448 | 20 | def force_machine(self): | 23 | def unit_placement(self): |
449 | 21 | return self.svc_data.get('to') or self.svc_data.get( | 24 | # Separate checks to support machine 0 placement. |
450 | 22 | 'force-machine') | 25 | value = self.svc_data.get('to') |
451 | 26 | if value is None: | ||
452 | 27 | value = self.svc_data.get('force-machine') | ||
453 | 28 | if value is not None and not isinstance(value, list): | ||
454 | 29 | value = [value] | ||
455 | 30 | return value or [] | ||
456 | 23 | 31 | ||
457 | 24 | @property | 32 | @property |
458 | 25 | def expose(self): | 33 | def expose(self): |
459 | 26 | 34 | ||
460 | === modified file 'deployer/tests/test_config.py' | |||
461 | --- deployer/tests/test_config.py 2013-10-10 21:18:38 +0000 | |||
462 | +++ deployer/tests/test_config.py 2013-11-01 15:46:48 +0000 | |||
463 | @@ -54,7 +54,6 @@ | |||
464 | 54 | config.get('my-files-frontend-dev').get_services()] | 54 | config.get('my-files-frontend-dev').get_services()] |
465 | 55 | self.assertTrue(set(wordpress).issubset(set(my_app))) | 55 | self.assertTrue(set(wordpress).issubset(set(my_app))) |
466 | 56 | 56 | ||
467 | 57 | |||
468 | 58 | def test_inherits_config_overridden(self): | 57 | def test_inherits_config_overridden(self): |
469 | 59 | config = ConfigStack([ | 58 | config = ConfigStack([ |
470 | 60 | os.path.join(self.test_data_dir, "stack-default.cfg"), | 59 | os.path.join(self.test_data_dir, "stack-default.cfg"), |
471 | @@ -66,12 +65,12 @@ | |||
472 | 66 | # over-ridden | 65 | # over-ridden |
473 | 67 | self.assertEquals(db.config.get('tuning-level'), 'fastest') | 66 | self.assertEquals(db.config.get('tuning-level'), 'fastest') |
474 | 68 | 67 | ||
475 | 69 | |||
476 | 70 | def test_multi_inheritance_multi_files(self): | 68 | def test_multi_inheritance_multi_files(self): |
477 | 71 | config = ConfigStack([ | 69 | config = ConfigStack([ |
478 | 72 | os.path.join(self.test_data_dir, "openstack", "openstack.cfg"), | 70 | os.path.join(self.test_data_dir, "openstack", "openstack.cfg"), |
479 | 73 | os.path.join(self.test_data_dir, "openstack", "ubuntu_base.cfg"), | 71 | os.path.join(self.test_data_dir, "openstack", "ubuntu_base.cfg"), |
481 | 74 | os.path.join(self.test_data_dir, "openstack", "openstack_base.cfg"), | 72 | os.path.join( |
482 | 73 | self.test_data_dir, "openstack", "openstack_base.cfg"), | ||
483 | 75 | ]) | 74 | ]) |
484 | 76 | self._test_multiple_inheritance(config) | 75 | self._test_multiple_inheritance(config) |
485 | 77 | 76 | ||
486 | @@ -107,7 +106,7 @@ | |||
487 | 107 | 106 | ||
488 | 108 | deployment = config.get('precise-grizzly') | 107 | deployment = config.get('precise-grizzly') |
489 | 109 | services = [s.name for s in list(deployment.get_services())] | 108 | services = [s.name for s in list(deployment.get_services())] |
491 | 110 | self.assertEquals(['nova-cloud-controller', 'mysql'], services) | 109 | self.assertEquals(['mysql', 'nova-cloud-controller'], services) |
492 | 111 | 110 | ||
493 | 112 | nova = deployment.get_service('nova-cloud-controller') | 111 | nova = deployment.get_service('nova-cloud-controller') |
494 | 113 | self.assertEquals(nova.config['openstack-origin'], | 112 | self.assertEquals(nova.config['openstack-origin'], |
495 | @@ -116,8 +115,8 @@ | |||
496 | 116 | deployment = config.get('precise-grizzly-quantum') | 115 | deployment = config.get('precise-grizzly-quantum') |
497 | 117 | services = [s.name for s in list(deployment.get_services())] | 116 | services = [s.name for s in list(deployment.get_services())] |
498 | 118 | self.assertEquals(services, | 117 | self.assertEquals(services, |
501 | 119 | ['quantum-gateway', 'nova-cloud-controller', | 118 | ['mysql', 'nova-cloud-controller', |
502 | 120 | 'mysql']) | 119 | 'quantum-gateway']) |
503 | 121 | nova = deployment.get_service('nova-cloud-controller') | 120 | nova = deployment.get_service('nova-cloud-controller') |
504 | 122 | self.assertEquals(nova.config['network-manager'], 'Quantum') | 121 | self.assertEquals(nova.config['network-manager'], 'Quantum') |
505 | 123 | self.assertEquals(nova.config['openstack-origin'], | 122 | self.assertEquals(nova.config['openstack-origin'], |
506 | 124 | 123 | ||
507 | === added file 'deployer/tests/test_data/stack-placement-invalid-2.yaml' | |||
508 | --- deployer/tests/test_data/stack-placement-invalid-2.yaml 1970-01-01 00:00:00 +0000 | |||
509 | +++ deployer/tests/test_data/stack-placement-invalid-2.yaml 2013-11-01 15:46:48 +0000 | |||
510 | @@ -0,0 +1,13 @@ | |||
511 | 1 | stack: | ||
512 | 2 | series: precise | ||
513 | 3 | services: | ||
514 | 4 | nova-compute: | ||
515 | 5 | charm: cs:precise/nova-compute | ||
516 | 6 | units: 3 | ||
517 | 7 | ceph: | ||
518 | 8 | units: 3 | ||
519 | 9 | to: [nova-compute, nova-compute, nova-compute] | ||
520 | 10 | mysql: | ||
521 | 11 | to: lxc:nova-compute | ||
522 | 12 | wordpress: | ||
523 | 13 | to: lxc:foobar | ||
524 | 0 | 14 | ||
525 | === added file 'deployer/tests/test_data/stack-placement-invalid.yaml' | |||
526 | --- deployer/tests/test_data/stack-placement-invalid.yaml 1970-01-01 00:00:00 +0000 | |||
527 | +++ deployer/tests/test_data/stack-placement-invalid.yaml 2013-11-01 15:46:48 +0000 | |||
528 | @@ -0,0 +1,13 @@ | |||
529 | 1 | stack: | ||
530 | 2 | series: precise | ||
531 | 3 | services: | ||
532 | 4 | nova-compute: | ||
533 | 5 | charm: cs:precise/nova-compute | ||
534 | 6 | units: 3 | ||
535 | 7 | ceph: | ||
536 | 8 | units: 3 | ||
537 | 9 | to: [nova-compute, nova-compute, nova-compute] | ||
538 | 10 | mysql: | ||
539 | 11 | to: lxc:nova-compute | ||
540 | 12 | wordpress: | ||
541 | 13 | to: lxc:mysql | ||
542 | 0 | 14 | ||
543 | === added file 'deployer/tests/test_data/stack-placement.yaml' | |||
544 | --- deployer/tests/test_data/stack-placement.yaml 1970-01-01 00:00:00 +0000 | |||
545 | +++ deployer/tests/test_data/stack-placement.yaml 2013-11-01 15:46:48 +0000 | |||
546 | @@ -0,0 +1,18 @@ | |||
547 | 1 | stack: | ||
548 | 2 | series: precise | ||
549 | 3 | services: | ||
550 | 4 | nova-compute: | ||
551 | 5 | charm: cs:precise/nova-compute | ||
552 | 6 | units: 3 | ||
553 | 7 | ceph: | ||
554 | 8 | units: 3 | ||
555 | 9 | to: [nova-compute, nova-compute] | ||
556 | 10 | mysql: | ||
557 | 11 | to: 0 | ||
558 | 12 | quantum: | ||
559 | 13 | units: 4 | ||
560 | 14 | to: ["lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute"] | ||
561 | 15 | verity: | ||
562 | 16 | to: lxc:nova-compute=2 | ||
563 | 17 | semper: | ||
564 | 18 | to: nova-compute=2 | ||
565 | 0 | 19 | ||
566 | === modified file 'deployer/tests/test_deployment.py' | |||
567 | --- deployer/tests/test_deployment.py 2013-07-24 23:10:15 +0000 | |||
568 | +++ deployer/tests/test_deployment.py 2013-11-01 15:46:48 +0000 | |||
569 | @@ -5,7 +5,7 @@ | |||
570 | 5 | 5 | ||
571 | 6 | from deployer.config import ConfigStack | 6 | from deployer.config import ConfigStack |
572 | 7 | from deployer.deployment import Deployment | 7 | from deployer.deployment import Deployment |
574 | 8 | from deployer.utils import setup_logging | 8 | from deployer.utils import setup_logging, ErrorExit |
575 | 9 | 9 | ||
576 | 10 | from .base import Base | 10 | from .base import Base |
577 | 11 | 11 | ||
578 | @@ -16,6 +16,11 @@ | |||
579 | 16 | self.output = setup_logging( | 16 | self.output = setup_logging( |
580 | 17 | debug=True, verbose=True, stream=StringIO.StringIO()) | 17 | debug=True, verbose=True, stream=StringIO.StringIO()) |
581 | 18 | 18 | ||
582 | 19 | def get_named_deployment(self, file_name, stack_name): | ||
583 | 20 | return ConfigStack( | ||
584 | 21 | [os.path.join( | ||
585 | 22 | self.test_data_dir, file_name)]).get(stack_name) | ||
586 | 23 | |||
587 | 19 | def test_deployer(self): | 24 | def test_deployer(self): |
588 | 20 | d = ConfigStack( | 25 | d = ConfigStack( |
589 | 21 | [os.path.join( | 26 | [os.path.join( |
590 | @@ -51,6 +56,66 @@ | |||
591 | 51 | list(d.get_relations()), | 56 | list(d.get_relations()), |
592 | 52 | [('blog', 'db'), ('blog', 'cache'), ('blog', 'haproxy')]) | 57 | [('blog', 'db'), ('blog', 'cache'), ('blog', 'haproxy')]) |
593 | 53 | 58 | ||
594 | 59 | def test_validate_placement_sorting(self): | ||
595 | 60 | d = self.get_named_deployment("stack-placement.yaml", "stack") | ||
596 | 61 | services = d.get_services() | ||
597 | 62 | self.assertEqual(services[0].name, 'nova-compute') | ||
598 | 63 | try: | ||
599 | 64 | d.validate_placement() | ||
600 | 65 | except ErrorExit: | ||
601 | 66 | self.fail("Should not fail") | ||
602 | 67 | |||
603 | 68 | def test_validate_invalid_placement_nested(self): | ||
604 | 69 | d = self.get_named_deployment("stack-placement-invalid.yaml", "stack") | ||
605 | 70 | services = d.get_services() | ||
606 | 71 | self.assertEqual(services[0].name, 'nova-compute') | ||
607 | 72 | try: | ||
608 | 73 | d.validate_placement() | ||
609 | 74 | except ErrorExit: | ||
610 | 75 | pass | ||
611 | 76 | else: | ||
612 | 77 | self.fail("Should fail") | ||
613 | 78 | |||
614 | 79 | def test_validate_invalid_placement_no_with_service(self): | ||
615 | 80 | d = self.get_named_deployment( | ||
616 | 81 | "stack-placement-invalid-2.yaml", "stack") | ||
617 | 82 | services = d.get_services() | ||
618 | 83 | self.assertEqual(services[0].name, 'nova-compute') | ||
619 | 84 | try: | ||
620 | 85 | d.validate_placement() | ||
621 | 86 | except ErrorExit: | ||
622 | 87 | pass | ||
623 | 88 | else: | ||
624 | 89 | self.fail("Should fail") | ||
625 | 90 | |||
626 | 91 | def test_get_unit_placement(self): | ||
627 | 92 | d = self.get_named_deployment("stack-placement.yaml", "stack") | ||
628 | 93 | status = { | ||
629 | 94 | 'services': { | ||
630 | 95 | 'nova-compute': { | ||
631 | 96 | 'units': { | ||
632 | 97 | 'nova-compute/2': {'machine': '1'}, | ||
633 | 98 | 'nova-compute/3': {'machine': '2'}, | ||
634 | 99 | 'nova-compute/4': {'machine': '3'}}}}} | ||
635 | 100 | svc = d.get_service('ceph') | ||
636 | 101 | self.assertEqual(d.get_unit_placement(svc, 0, status), '1') | ||
637 | 102 | self.assertEqual(d.get_unit_placement(svc, 1, status), '2') | ||
638 | 103 | self.assertEqual(d.get_unit_placement(svc, 2, status), None) | ||
639 | 104 | |||
640 | 105 | svc = d.get_service('quantum') | ||
641 | 106 | self.assertEqual(d.get_unit_placement(svc, 0, status), 'lxc:1') | ||
642 | 107 | self.assertEqual(d.get_unit_placement(svc, 2, status), 'lxc:3') | ||
643 | 108 | self.assertEqual(d.get_unit_placement(svc, 3, status), None) | ||
644 | 109 | |||
645 | 110 | svc = d.get_service('verity') | ||
646 | 111 | self.assertEqual(d.get_unit_placement(svc, 0, status), 'lxc:3') | ||
647 | 112 | |||
648 | 113 | svc = d.get_service('mysql') | ||
649 | 114 | self.assertEqual(d.get_unit_placement(svc, 0, status), '0') | ||
650 | 115 | |||
651 | 116 | svc = d.get_service('semper') | ||
652 | 117 | self.assertEqual(d.get_unit_placement(svc, 0, status), '3') | ||
653 | 118 | |||
654 | 54 | def test_multiple_relations_no_weight(self): | 119 | def test_multiple_relations_no_weight(self): |
655 | 55 | data = {"relations": {"wordpress": {"consumes": ["mysql"]}, | 120 | data = {"relations": {"wordpress": {"consumes": ["mysql"]}, |
656 | 56 | "nginx": {"consumes": ["wordpress"]}}} | 121 | "nginx": {"consumes": ["wordpress"]}}} |
657 | 57 | 122 | ||
658 | === modified file 'doc/config.rst' | |||
659 | --- doc/config.rst 2013-05-16 03:20:42 +0000 | |||
660 | +++ doc/config.rst 2013-11-01 15:46:48 +0000 | |||
661 | @@ -94,3 +94,57 @@ | |||
662 | 94 | constraints: mem=16 | 94 | constraints: mem=16 |
663 | 95 | options: | 95 | options: |
664 | 96 | tuning: optimized | 96 | tuning: optimized |
665 | 97 | |||
666 | 98 | |||
667 | 99 | Placement | ||
668 | 100 | ========= | ||
669 | 101 | |||
670 | 102 | Flexible unit placement can be specified via deployer. Primarily this is via | ||
671 | 103 | specifying service units deployments alongside those of another service. | ||
672 | 104 | |||
673 | 105 | Each unit's placement must be specified individually, absence of placement for | ||
674 | 106 | a unit results in juju default behavior for the given constraints. | ||
675 | 107 | |||
676 | 108 | One special placement form is machine placement, which only allowed to machine 0, | ||
677 | 109 | as other machine identities are ambigious for most usage scenarios. | ||
678 | 110 | |||
679 | 111 | Both container and hulk-smash placement are supported. Different | ||
680 | 112 | containerization and deploy with services can be mixed. | ||
681 | 113 | |||
682 | 114 | The deployed-with service must have enough units to hold # # Nested | ||
683 | 115 | to: specifications are not supported (ie in the below wordpress can't | ||
684 | 116 | # be deployed to mysql because mysql already specifies a 'to' | ||
685 | 117 | placement) | ||
686 | 118 | |||
687 | 119 | Example:: | ||
688 | 120 | envExport: | ||
689 | 121 | services: | ||
690 | 122 | mysql: | ||
691 | 123 | # The only machine id supported is machine 0 | ||
692 | 124 | to: 0 | ||
693 | 125 | wordpress: | ||
694 | 126 | units: 3 | ||
695 | 127 | redis-server: | ||
696 | 128 | units: 3 | ||
697 | 129 | to: [lxc:wordpress, wordpress] | ||
698 | 130 | ceph: | ||
699 | 131 | units: 4 | ||
700 | 132 | to: [wordpress, wordpress, wordpress, wordpress] | ||
701 | 133 | serenade: | ||
702 | 134 | to: lxc:wordpress=2 | ||
703 | 135 | |||
704 | 136 | In this case the first unit of redis-server is deployed in a container | ||
705 | 137 | on wordpress/0 The second unit of redis-server is deployed hulk smash | ||
706 | 138 | onto wordpress/1 The third unit of redis-server gets a full machine | ||
707 | 139 | allocated to itself. | ||
708 | 140 | |||
709 | 141 | For ceph, we deploy hulk smash of the first 3 units, the final unit doesn't | ||
710 | 142 | have a corresponding unit of wordpress and is deployed (along with a console | ||
711 | 143 | warning) to a separate machine per its default constraints. | ||
712 | 144 | |||
713 | 145 | The serenade service is overriding the default deploy-with unit by | ||
714 | 146 | explicitly specifying a unit index for the deployment. These are not | ||
715 | 147 | unit id based but rather zero based offsets into a sorted list of | ||
716 | 148 | units. | ||
717 | 149 | |||
718 | 150 | |||
719 | 97 | 151 | ||
720 | === modified file 'setup.py' | |||
721 | --- setup.py 2013-10-11 17:01:16 +0000 | |||
722 | +++ setup.py 2013-11-01 15:46:48 +0000 | |||
723 | @@ -12,7 +12,7 @@ | |||
724 | 12 | author="Kapil Thangavelu", | 12 | author="Kapil Thangavelu", |
725 | 13 | author_email="kapil.foss@gmail.com", | 13 | author_email="kapil.foss@gmail.com", |
726 | 14 | url="http://launchpad.net/juju-deployer", | 14 | url="http://launchpad.net/juju-deployer", |
728 | 15 | install_requires=["jujuclient >= 0.0.7"], | 15 | install_requires=["jujuclient >= 0.13"], |
729 | 16 | packages=find_packages(), | 16 | packages=find_packages(), |
730 | 17 | classifiers=[ | 17 | classifiers=[ |
731 | 18 | "Development Status :: 2 - Pre-Alpha", | 18 | "Development Status :: 2 - Pre-Alpha", |
Testing this now, couple of issues so far:
If a service is configured as:
fooservice:
to: [0]
deployer blows up:
70, in get_unit_placement
if ':' in unit_placement:
TypeError: argument of type 'int' is not iterable
Only running that code if unit_placement is a string seems to fix that.
Adding units fails:
2013-10-31 17:34:26 [INFO] deployer.import: Adding 2 more units to nova-compute bin/juju- deployer" , line 9, in <module> entry_point( 'juju-deployer= =0.2.8' , 'console_scripts', 'juju-deployer')() lib/python2. 7/dist- packages/ juju_deployer- 0.2.8-py2. 7.egg/deployer/ cli.py" , line 118, in main lib/python2. 7/dist- packages/ juju_deployer- 0.2.8-py2. 7.egg/deployer/ cli.py" , line 209, in run Importer( env, deployment, options).run() lib/python2. 7/dist- packages/ juju_deployer- 0.2.8-py2. 7.egg/deployer/ action/ importer. py", line 186, in run add_units( ) lib/python2. 7/dist- packages/ juju_deployer- 0.2.8-py2. 7.egg/deployer/ action/ importer. py", line 54, in add_units env.add_ unit(svc. name, mspec) lib/python2. 7/dist- packages/ juju_deployer- 0.2.8-py2. 7.egg/deployer/ env/go. py", line 28, in add_unit add_unit( service_ name, machine_spec) lib/python2. 7/dist- packages/ jujuclient- 0.12-py2. 7.egg/jujuclien t.py", line 465, in add_unit lib/python2. 7/dist- packages/ jujuclient- 0.12-py2. 7.egg/jujuclien t.py", line 135, in _rpc EnvError: <Env Error - Details:
Traceback (most recent call last):
File "/usr/local/
load_
File "/usr/local/
run()
File "/usr/local/
importer.
File "/usr/local/
self.
File "/usr/local/
self.
File "/usr/local/
return self.client.
File "/usr/local/
"Params": params})
File "/usr/local/
raise EnvError(result)
jujuclient.
{ u'Error': u'must add at least one unit', u'RequestId': 1, u'Response': { }}