Merge lp:~sidnei/juju-deployer/build-command into lp:~gandelman-a/juju-deployer/trunk
- build-command
- Merge into trunk
Proposed by
Sidnei da Silva
Status: | Merged |
---|---|
Merged at revision: | 68 |
Proposed branch: | lp:~sidnei/juju-deployer/build-command |
Merge into: | lp:~gandelman-a/juju-deployer/trunk |
Diff against target: |
740 lines (+192/-109) 2 files modified
deployer.py (+76/-52) utils.py (+116/-57) |
To merge this branch: | bzr merge lp:~sidnei/juju-deployer/build-command |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Adam Gandelman | Pending | ||
Review via email: mp+151035@code.launchpad.net |
Commit message
- Add minimal support for a 'build' step after fetching the charm source
- Resolve includes as relative to the config files provided on command line if they are relative paths
- General code cleanup to conform to pep8
Description of the change
- Add minimal support for a 'build' step after fetching the charm source
- Resolve includes as relative to the config files provided on command line if they are relative paths
- General code cleanup to conform to pep8
To post a comment you must log in.
- 67. By Sidnei da Silva
-
- Merge from trunk, resolve conflicts
- 68. By Sidnei da Silva
-
- Fix bad merge
- 69. By Sidnei da Silva
-
- Also set non-include options.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'deployer.py' |
2 | --- deployer.py 2013-03-01 18:09:35 +0000 |
3 | +++ deployer.py 2013-03-04 19:16:20 +0000 |
4 | @@ -4,89 +4,96 @@ |
5 | import subprocess |
6 | import time |
7 | import os |
8 | -import sys |
9 | import yaml |
10 | import json |
11 | import optparse |
12 | import pprint |
13 | import signal |
14 | import __builtin__ |
15 | + |
16 | +from os.path import dirname, abspath |
17 | from pdb import * |
18 | - |
19 | from utils import * |
20 | |
21 | + |
22 | def timed_out(signal, frame): |
23 | log.error("Deployment timed out after %s sec.", opts.timeout) |
24 | exit(1) |
25 | |
26 | -start_time=time.time() |
27 | +start_time = time.time() |
28 | |
29 | parser = optparse.OptionParser() |
30 | parser.add_option('-c', '--config', |
31 | - help='File containing deployment(s) json config. This option can be repeated, with later files overriding values in earlier ones.', |
32 | + help=('File containing deployment(s) json config. This ' |
33 | + 'option can be repeated, with later files overriding ' |
34 | + 'values in earlier ones.'), |
35 | dest='configs', action='append') |
36 | -parser.add_option('-d', '--debug', help='Enable debugging to stdout', dest="debug", |
37 | +parser.add_option('-d', '--debug', help='Enable debugging to stdout', |
38 | + dest="debug", |
39 | action="store_true", default=False) |
40 | parser.add_option('-L', '--local-mods', |
41 | help='Allow deployment of locally-modified charms', |
42 | dest="no_local_mods", default=True, action='store_false') |
43 | -parser.add_option('-u', '--update-charms', help='Update existing charm branches', |
44 | +parser.add_option('-u', '--update-charms', |
45 | + help='Update existing charm branches', |
46 | dest="update_charms", default=False, action="store_true") |
47 | parser.add_option('-l', '--ls', help='List available deployments', |
48 | - dest="list_deploys", action="store_true", default=False ) |
49 | + dest="list_deploys", action="store_true", default=False) |
50 | parser.add_option('-D', '--destroy-services', |
51 | help='Destroy all services (do not terminate machines)', |
52 | - dest="destroy_services", action="store_true", default="False") |
53 | + dest="destroy_services", action="store_true", |
54 | + default="False") |
55 | parser.add_option('-S', '--scrub-zk', action='store_true', default=False, |
56 | - dest='scrub_zk', |
57 | - help='Remove charm nodes from ZK tree after service destroy.') |
58 | + dest='scrub_zk', |
59 | + help='Remove charm nodes from ZK after service destroy.') |
60 | parser.add_option('-T', '--terminate-machines', |
61 | - help='Terminate all machines but the bootstrap node. Destroy'\ |
62 | - 'any services that exist on each', |
63 | - dest="terminate_machines", action="store_true", default="False") |
64 | + help=('Terminate all machines but the bootstrap node. ' |
65 | + 'Destroy any services that exist on each'), |
66 | + dest="terminate_machines", action="store_true", |
67 | + default="False") |
68 | parser.add_option('-t', '--timeout', |
69 | help='Timeout (sec) for entire deployment (45min default)', |
70 | dest='timeout', action='store', type='int', default=2700) |
71 | parser.add_option("-f", '--find-service', action="store", type="string", |
72 | - help='Return hostname of the first unit of a specific service.', |
73 | + help='Find hostname from first unit of a specific service.', |
74 | dest="find_service") |
75 | parser.add_option("-m", '--max-concurrent', action="store", type="int", |
76 | - help="Maximum number of concurrent deployments to send to "\ |
77 | - "provider. Default: no limit", |
78 | + help=("Maximum number of concurrent deployments to send " |
79 | + " to provider. Default: no limit"), |
80 | dest="max_concur_deploy", default=0) |
81 | parser.add_option('-s', '--deploy-delay', action='store', type='float', |
82 | - help="Time in seconds to sleep between 'deploy' commands, "\ |
83 | - "to allow machine provider to process requests. This "\ |
84 | - "delay is also enforced between calls to" |
85 | - "terminate_machine", |
86 | + help=("Time in seconds to sleep between 'deploy' commands, " |
87 | + "to allow machine provider to process requests. This " |
88 | + "delay is also enforced between calls to" |
89 | + "terminate_machine"), |
90 | dest="deploy_delay", default=0) |
91 | parser.add_option('-e', '--environment', action='store', dest='juju_env', |
92 | help='Deploy to a specific Juju environment.', |
93 | default=os.getenv('JUJU_ENV')) |
94 | parser.add_option('-o', '--override', action='append', type='string', |
95 | - help='Override *all* config options of the same name '\ |
96 | - 'across all services. Input as key=value.', |
97 | + help=('Override *all* config options of the same name ' |
98 | + 'across all services. Input as key=value.'), |
99 | dest='overrides', default=None) |
100 | parser.add_option('-w', '--relation-wait', action='store', dest='rel_wait', |
101 | default='60', |
102 | - help='Number of seconds to wait before checking for relation '\ |
103 | - 'errors after all relations have been added and '\ |
104 | - 'subordinates started. (default: 60)') |
105 | + help=('Number of seconds to wait before checking for ' |
106 | + 'relation errors after all relations have been added ' |
107 | + 'and subordinates started. (default: 60)')) |
108 | (opts, args) = parser.parse_args() |
109 | |
110 | if not opts.configs: |
111 | opts.configs = ['deployments.cfg'] |
112 | -update_charms=opts.update_charms |
113 | +update_charms = opts.update_charms |
114 | |
115 | # temporarily abuse __builtin__ till this is setup properly |
116 | __builtin__.juju_log = juju_log = open("juju.log", "w") |
117 | -__builtin__.juju_cmds=[] |
118 | +__builtin__.juju_cmds = [] |
119 | |
120 | init_logging("debug.log", opts.debug) |
121 | |
122 | -ORIGCWD=os.getcwd() |
123 | +ORIGCWD = os.getcwd() |
124 | |
125 | -if opts.destroy_services == True or opts.terminate_machines == True: |
126 | +if opts.destroy_services is True or opts.terminate_machines is True: |
127 | destroy_all(juju_status(opts.juju_env), opts.juju_env, |
128 | terminate_machines=opts.terminate_machines, |
129 | scrub_zk=opts.scrub_zk, |
130 | @@ -94,7 +101,7 @@ |
131 | exit(0) |
132 | |
133 | if opts.find_service is not None: |
134 | - rc=find_service(juju_status(opts.juju_env), opts.find_service) |
135 | + rc = find_service(juju_status(opts.juju_env), opts.find_service) |
136 | exit(rc) |
137 | |
138 | # load the json configuration for possible deployments. |
139 | @@ -105,8 +112,10 @@ |
140 | |
141 | debug_msg("Loading deployments from {}".format(", ".join(opts.configs))) |
142 | cfg = {} |
143 | +include_dirs = [] |
144 | for config in opts.configs: |
145 | - with open(config,'r') as f: |
146 | + include_dirs.append(dirname(abspath(config))) |
147 | + with open(config, 'r') as f: |
148 | try: |
149 | cfg = dict_merge(cfg, json.load(f)) |
150 | except ValueError as exc: |
151 | @@ -125,7 +134,7 @@ |
152 | |
153 | deployment = args[0] |
154 | SERIES, CHARMS, RELATIONS, overrides = load_deployment(cfg, deployment) |
155 | -series_store="%s/%s" % (ORIGCWD, SERIES) |
156 | +series_store = "%s/%s" % (ORIGCWD, SERIES) |
157 | |
158 | # series store ends up being the local juju charm repository |
159 | if not os.path.exists(series_store): |
160 | @@ -139,16 +148,19 @@ |
161 | charm_path = "%s/%s" % (series_store, k) |
162 | debug_msg("Charm '%s' - using charm path '%s'" % (k, charm_path)) |
163 | (branch, sep, revno) = CHARMS[k].get("branch", '').partition('@') |
164 | + needs_build = update_charms |
165 | if branch: |
166 | debug_msg("Branch: {}, revision: {}".format(branch, revno)) |
167 | else: |
168 | debug_msg("No remote branch specified") |
169 | + needs_build = False |
170 | if os.path.exists(charm_path): |
171 | if opts.no_local_mods: |
172 | with cd(charm_path): |
173 | # is there a better way to check for changes? |
174 | - bzrstatus = subprocess.check_output(['bzr','st']).strip() |
175 | - if bzrstatus not in ("","working tree is out of date, run 'bzr update'"): |
176 | + bzrstatus = subprocess.check_output(['bzr', 'st']).strip() |
177 | + if bzrstatus not in ( |
178 | + "", "working tree is out of date, run 'bzr update'"): |
179 | log.error("Charm is locally modified: {}".format( |
180 | charm_path)) |
181 | log.error("Aborting") |
182 | @@ -157,14 +169,15 @@ |
183 | if update_charms and branch: |
184 | debug_msg("Updating charm branch '%s'" % k) |
185 | code = subprocess.call( |
186 | - ["bzr", "pull", "-d", charm_path, '--remember', branch]) |
187 | + ["bzr", "pull", "-d", charm_path, '--remember', branch]) |
188 | if code != 0: |
189 | log.error("Could not update branch at {} from {}".format( |
190 | - charm_path, branch)) |
191 | + charm_path, branch)) |
192 | exit(code) |
193 | elif branch: |
194 | print "- Cloning %s from %s" % (k, branch) |
195 | subprocess.call(["bzr", "branch", branch, charm_path]) |
196 | + needs_build = True |
197 | if revno: |
198 | cmd = ["bzr", "update", charm_path] |
199 | revno != 'tip' and cmd.extend(['-r', revno]) |
200 | @@ -172,6 +185,14 @@ |
201 | if code != 0: |
202 | log.error("Unable to check out branch revision {}".format(revno)) |
203 | exit(code) |
204 | + if CHARMS[k].get("build") is not None and needs_build: |
205 | + cmd = CHARMS[k]["build"] |
206 | + debug_msg("Running build command at {}...".format(charm_path)) |
207 | + with cd(charm_path): |
208 | + code = subprocess.call(cmd) |
209 | + if code != 0: |
210 | + log.error("Failed to build charm {}".format(k)) |
211 | + exit(code) |
212 | # load charms metadata |
213 | if not os.path.isdir(charm_path): |
214 | print "Branch for {} does not exist ({})".format(k, charm_path) |
215 | @@ -208,7 +229,7 @@ |
216 | # create a temporary deploy-time config yaml |
217 | temp = tempfile.NamedTemporaryFile() |
218 | deploy_config = temp.name |
219 | -CONFIG = generate_deployment_config(temp, CHARMS) |
220 | +CONFIG = generate_deployment_config(temp, CHARMS, include_dirs) |
221 | log.debug("Using the following config:\n%s", pprint.pformat(CONFIG)) |
222 | |
223 | # make sure we're bootstrapped |
224 | @@ -216,8 +237,8 @@ |
225 | if status == 1: |
226 | log.error("Is juju bootstrapped?") |
227 | exit(1) |
228 | -if status["machines"][0]["instance-state"] != "provisioned" and \ |
229 | - status["machines"][0]["agent-state"] != "running": |
230 | +if (status["machines"][0]["instance-state"] != "provisioned" and |
231 | + status["machines"][0]["agent-state"] != "running"): |
232 | log.error("Bootstrap node not running?") |
233 | exit(1) |
234 | |
235 | @@ -226,7 +247,7 @@ |
236 | signal.alarm(opts.timeout) |
237 | |
238 | # figure out what needs to be done |
239 | -to_deploy=[] |
240 | +to_deploy = [] |
241 | for c in CHARMS.keys(): |
242 | if c not in status["services"].keys(): |
243 | to_deploy.append(c) |
244 | @@ -241,12 +262,12 @@ |
245 | start_groups = [] |
246 | if to_deploy: |
247 | # go through to_deploy in chunks of group_by |
248 | - start_groups = [to_deploy[i:i+groups_of] |
249 | - for i in range(0, len(to_deploy), groups_of)] |
250 | + start_groups = [to_deploy[i:i + groups_of] |
251 | + for i in range(0, len(to_deploy), groups_of)] |
252 | for group_num in range(0, len(start_groups)): |
253 | for c in start_groups[group_num]: |
254 | - print "- Deploying %s in group %d/%d" % \ |
255 | - (c, group_num+1, len(start_groups)) |
256 | + print ("- Deploying %s in group %d/%d" % |
257 | + (c, group_num + 1, len(start_groups))) |
258 | cmd = "deploy" |
259 | if "units" in CHARMS[c]: |
260 | cmd += " -n %s" % CHARMS[c]["units"] |
261 | @@ -254,16 +275,18 @@ |
262 | cmd += " --constraints=%s" % CHARMS[c]["constraints"] |
263 | if c in CONFIG.keys(): |
264 | cmd += " --config=%s" % deploy_config |
265 | - cmd += " --repository=%s local:%s %s" % (ORIGCWD, |
266 | - CHARMS[c]["metadata"]["name"], c) |
267 | + cmd += " --repository=%s local:%s %s" % ( |
268 | + ORIGCWD, CHARMS[c]["metadata"]["name"], c) |
269 | if opts.juju_env: |
270 | cmd += " -e %s" % opts.juju_env |
271 | juju_call(cmd) |
272 | if opts.deploy_delay > 0: |
273 | - debug_msg("Delaying %s sec. between deployment" % opts.deploy_delay) |
274 | + debug_msg("Delaying %s sec. between deployment" % |
275 | + opts.deploy_delay) |
276 | time.sleep(opts.deploy_delay) |
277 | wait_for_started(opts.debug, opts.juju_env, sleep=3.0, |
278 | - msg="- Waiting for started: %s" % start_groups[group_num]) |
279 | + msg="- Waiting for started: %s" % |
280 | + start_groups[group_num]) |
281 | |
282 | if len(start_groups) != 0: |
283 | status = juju_status(opts.juju_env) |
284 | @@ -280,13 +303,12 @@ |
285 | cmd = "add-unit --num-units %d %s" % (needed_units, c) |
286 | juju_call(cmd) |
287 | else: |
288 | - debug_msg("Service '%s' does not require any more units added." % c) |
289 | + debug_msg("Service '%s' does not need any more units added." % c) |
290 | |
291 | # poll juju status until all services report strated. fail on any error |
292 | wait_for_started(opts.debug, opts.juju_env, |
293 | - "- Waiting for all service units to reach 'started' state.") |
294 | + "- Waiting for all service units to reach 'started' state.") |
295 | |
296 | -time.sleep(1) |
297 | # add all relations, ordered by weight |
298 | if RELATIONS: |
299 | print "- Adding relations:" |
300 | @@ -313,7 +335,9 @@ |
301 | if not ensure_relations_up(juju_status(opts.juju_env)): |
302 | exit(1) |
303 | |
304 | -print "- Deployment complete in %d seconds.\n\n" % (int(time.time() - start_time)) |
305 | +print ("- Deployment complete in %d seconds.\n\n" % |
306 | + (int(time.time() - start_time))) |
307 | + |
308 | print "- Juju command log:" |
309 | for c in __builtin__.juju_cmds: |
310 | print c |
311 | |
312 | === modified file 'utils.py' |
313 | --- utils.py 2013-02-21 02:05:38 +0000 |
314 | +++ utils.py 2013-03-04 19:16:20 +0000 |
315 | @@ -2,10 +2,11 @@ |
316 | import subprocess |
317 | import yaml |
318 | import __builtin__ |
319 | -import pdb |
320 | import sys |
321 | import time |
322 | import os |
323 | + |
324 | +from os.path import isabs, join, exists |
325 | from contextlib import contextmanager |
326 | from copy import deepcopy |
327 | from base64 import b64encode |
328 | @@ -24,10 +25,12 @@ |
329 | finally: |
330 | os.chdir(cwd) |
331 | |
332 | + |
333 | def relations_combine(onto, source): |
334 | target = deepcopy(onto) |
335 | for (key, value) in source.items(): |
336 | - if key in target and isinstance(target[key], dict) and isinstance(value, dict): |
337 | + if (key in target and isinstance(target[key], dict) and |
338 | + isinstance(value, dict)): |
339 | target[key] = relations_combine(target[key], value) |
340 | else: |
341 | if isinstance(target[key], list) and isinstance(value, list): |
342 | @@ -36,10 +39,12 @@ |
343 | target[key] = value |
344 | return target |
345 | |
346 | + |
347 | def dict_merge(onto, source): |
348 | target = deepcopy(onto) |
349 | for (key, value) in source.items(): |
350 | - if key in target and isinstance(target[key], dict) and isinstance(value, dict): |
351 | + if (key in target and isinstance(target[key], dict) and |
352 | + isinstance(value, dict)): |
353 | if key == 'relations': |
354 | target[key] = relations_combine(target[key], value) |
355 | else: |
356 | @@ -65,7 +70,7 @@ |
357 | |
358 | def ensure_started(status): |
359 | """ |
360 | - ensure all service units in an environment are in the 'started' state |
361 | + ensure all service units in an environment are in the 'started' state |
362 | """ |
363 | started = True |
364 | for s in status["services"]: |
365 | @@ -111,8 +116,8 @@ |
366 | if 'subordinates' in u: |
367 | sub = [(u['public-address'], |
368 | subunit, |
369 | - u['subordinates'][subunit]['agent-state']) \ |
370 | - for subunit in u['subordinates']][0] |
371 | + u['subordinates'][subunit]['agent-state']) |
372 | + for subunit in u['subordinates']][0] |
373 | subs_status.append(sub) |
374 | |
375 | started = True |
376 | @@ -126,7 +131,8 @@ |
377 | subs.remove(sub) |
378 | |
379 | if 'error' in state: |
380 | - log.error('Failed subordinate: %s @ %s, state: %s' % (unit, addr, state)) |
381 | + log.error('Failed subordinate: %s @ %s, state: %s' % |
382 | + (unit, addr, state)) |
383 | exit(1) |
384 | |
385 | if state != 'started': |
386 | @@ -137,11 +143,13 @@ |
387 | |
388 | return started |
389 | |
390 | + |
391 | def wait_for_subordinates_started(debug, env): |
392 | if not debug: |
393 | sys.stdout.write('Waiting for subordinate units started') |
394 | sys.stdout.flush() |
395 | - while ensure_subordinates_started(juju_status(env)) == False: |
396 | + |
397 | + while ensure_subordinates_started(juju_status(env)) is False: |
398 | if not debug: |
399 | sys.stdout.write('.') |
400 | sys.stdout.flush() |
401 | @@ -149,6 +157,7 @@ |
402 | if not debug: |
403 | sys.stdout.write('\n') |
404 | |
405 | + |
406 | def ensure_relations_up(status): |
407 | """ ensure all relations are at least 'up' and error free """ |
408 | failed = [] |
409 | @@ -158,7 +167,7 @@ |
410 | for u in status["services"][s]["units"]: |
411 | if 'relation-errors' in status['services'][s]['units'][u]: |
412 | errors = status['services'][s]['units'][u]['relation-errors'] |
413 | - for k,v in errors.iteritems(): |
414 | + for k, v in errors.iteritems(): |
415 | failed.append("%s:%s: %s" % (u, k, v)) |
416 | if failed: |
417 | log.error("Failed relations: %s\n\t", "\n\t".join(failed)) |
418 | @@ -166,38 +175,74 @@ |
419 | |
420 | return True |
421 | |
422 | -def generate_deployment_config(temp, charms): |
423 | + |
424 | +def resolve_include(fname, include_dirs): |
425 | + if isabs(fname): |
426 | + return fname |
427 | + |
428 | + for path in include_dirs: |
429 | + full_path = join(path, fname) |
430 | + if exists(full_path): |
431 | + return full_path |
432 | + |
433 | + return None |
434 | + |
435 | + |
436 | +def generate_deployment_config(temp, charms, include_dirs): |
437 | """ for a given deployment, generate a deploy-time temp. config yaml for |
438 | services that have options specified in deployments.cfg |
439 | """ |
440 | + # When resolving includes, use reverse order of the include_dirs to lookup |
441 | + # relative includes. |
442 | + include_dirs = include_dirs[::-1] |
443 | + |
444 | config = {} |
445 | for c in charms: |
446 | cc = {} |
447 | - if "options" in charms[c].keys(): |
448 | - if "config" not in charms[c].keys(): |
449 | - debug_msg("WARNING: Options passed to %s, but it has" \ |
450 | - "no config.yaml" % (c)) |
451 | + charm_opts = charms[c].get("options") |
452 | + charm_cfg = charms[c].get("config") |
453 | + if charm_opts is None: |
454 | + continue |
455 | + if charm_cfg is None: |
456 | + debug_msg("WARNING: Options passed to %s, but it has" |
457 | + "no config.yaml" % (c)) |
458 | + continue |
459 | + for opt, val in charm_opts.items(): |
460 | + if not opt in charm_cfg.keys(): |
461 | + debug_msg("WARNING: Skipping unknown config " |
462 | + "option to %s: %s" % (c, opt)) |
463 | + continue |
464 | + |
465 | + debug_msg("Adding option to '%s' to deploy config: %s" |
466 | + % (opt, c)) |
467 | + if (isinstance(val, basestring) and |
468 | + (val.startswith("include-file://") |
469 | + or val.startswith("include-base64://"))): |
470 | + include, fname = val.split("://", 1) |
471 | + full_path = resolve_include(fname, include_dirs) |
472 | + if full_path is None: |
473 | + debug_msg("WARNING: Skipping non-existing " |
474 | + "include file %s." % fname) |
475 | + continue |
476 | + else: |
477 | + debug_msg("Using external file: %s for '%s'" |
478 | + % (full_path, opt)) |
479 | + |
480 | + with open(full_path, 'r') as f: |
481 | + cc[opt] = f.read() |
482 | + |
483 | + if include.endswith("base64"): |
484 | + cc[opt] = b64encode(cc[opt]) |
485 | else: |
486 | - for opt, val in charms[c]["options"].items(): |
487 | - if opt in charms[c]["config"].keys(): |
488 | - debug_msg("Adding option to '%s' to deploy config: %s" \ |
489 | - % (opt, c)) |
490 | - if isinstance(val, basestring) and val.startswith("include-file://"): |
491 | - with open(val[15:], 'r') as f: |
492 | - cc[opt] = f.read() |
493 | - elif isinstance(val, basestring) and val.startswith("include-base64://"): |
494 | - with open(val[17:], 'r') as f: |
495 | - cc[opt] = b64encode(f.read()) |
496 | - else: |
497 | - cc[opt] = val |
498 | - else: |
499 | - debug_msg("WARNING: Skipping unknown config option to" \ |
500 | - "%s: %s" % (c, opt)) |
501 | - if cc: |
502 | - config[c] = cc |
503 | + cc[opt] = val |
504 | + |
505 | + if cc: |
506 | + config[c] = cc |
507 | + |
508 | yaml.safe_dump(config, temp) |
509 | return config |
510 | |
511 | + |
512 | def juju_status(juju_env=None): |
513 | """ load yaml output of 'juju status' |
514 | retry failed 'status' calls, which sometimes seems to |
515 | @@ -209,8 +254,8 @@ |
516 | if juju_env: |
517 | cmd = cmd + ['-e', juju_env] |
518 | p = subprocess.Popen(cmd, |
519 | - stdout=subprocess.PIPE, |
520 | - stderr=subprocess.PIPE) |
521 | + stdout=subprocess.PIPE, |
522 | + stderr=subprocess.PIPE) |
523 | p.poll() |
524 | (stdout, stderr) = p.communicate() |
525 | return (stdout, stderr, p.returncode) |
526 | @@ -222,7 +267,7 @@ |
527 | while rc != 0: |
528 | if attempt == max_retries: |
529 | log.error("Could not get Juju status after %s attempts, " |
530 | - " giving up.") |
531 | + " giving up.", attempt) |
532 | sys.exit(1) |
533 | log.error("Call to 'juju status' failed!") |
534 | log.error(stderr) |
535 | @@ -232,6 +277,7 @@ |
536 | y = yaml.load(stdout) |
537 | return y |
538 | |
539 | + |
540 | def juju_call(cmd, retries=5, ignore_failure=False): |
541 | juju_log = __builtin__.juju_log |
542 | |
543 | @@ -254,14 +300,17 @@ |
544 | log.error("Call to '%s', ignoring." % (cmd)) |
545 | return |
546 | if attempt == retries: |
547 | - log.error('Call to "%s" failed %s times, giving up.' % (cmd, retries)) |
548 | + log.error('Call to "%s" failed %s times, giving up.' % |
549 | + (cmd, retries)) |
550 | sys.exit(1) |
551 | attempt += 1 |
552 | - log.error('Call to "%s" failed, %s attempts left.' % (cmd, (retries - attempt))) |
553 | + log.error('Call to "%s" failed, %s attempts left.' % |
554 | + (cmd, (retries - attempt))) |
555 | time.sleep(3) |
556 | rc = _call(cmd, attempt) |
557 | return |
558 | |
559 | + |
560 | def find_service(status, service): |
561 | if service == "bootstrap": |
562 | if "machines" not in status.keys(): |
563 | @@ -278,10 +327,11 @@ |
564 | if len(status["services"][service]["units"]) == 0: |
565 | log.error("No units assigned to %s", service) |
566 | return 1 |
567 | - first_unit=status["services"][service]["units"].keys()[0] |
568 | + first_unit = status["services"][service]["units"].keys()[0] |
569 | print status['services'][service]["units"][first_unit]["public-address"] |
570 | return 0 |
571 | |
572 | + |
573 | def destroy_all(status=None, env=None, terminate_machines=False, |
574 | scrub_zk=False, delay=None): |
575 | """ destroy all services and optionally terminate all machines |
576 | @@ -299,7 +349,7 @@ |
577 | subordinates = [] |
578 | for s in status['services']: |
579 | if ('subordinate' in status['services'][s] and |
580 | - status['services'][s]['subordinate'] == True): |
581 | + status['services'][s]['subordinate'] is True): |
582 | subordinates.append(s) |
583 | else: |
584 | principles.append(s) |
585 | @@ -319,7 +369,7 @@ |
586 | debug_msg('Cleaning cached charms from ZK.') |
587 | scrub_zookeeper(destroyed) |
588 | |
589 | - if len(status["machines"]) > 0 and terminate_machines == True: |
590 | + if len(status["machines"]) > 0 and terminate_machines is True: |
591 | print "- Destroying machines" |
592 | machines = status["machines"].keys() |
593 | for m in machines[1:]: |
594 | @@ -327,9 +377,9 @@ |
595 | juju_call("terminate-machine %s%s" % (m, env_arg)) |
596 | if delay: |
597 | debug_msg("Sleeping for %s between after 'terminate-machine'" |
598 | - % delay) |
599 | + % delay) |
600 | time.sleep(delay) |
601 | - elif len(status["machines"]) == 0 and terminate_machines == True: |
602 | + elif len(status["machines"]) == 0 and terminate_machines is True: |
603 | debug_msg("No machines to destroy") |
604 | |
605 | |
606 | @@ -341,6 +391,7 @@ |
607 | out += " (%s)" % deployments[d]['series'] |
608 | print out |
609 | |
610 | + |
611 | def determine_interface(CHARMS, relation): |
612 | """ finds the appropriate interface for a given relation |
613 | relation = tuple (consumer, provider). Can also return None if a |
614 | @@ -354,17 +405,21 @@ |
615 | return |
616 | for r in consumer["requires"].keys(): |
617 | if r in provider["provides"].keys(): |
618 | - if consumer["requires"][r]["interface"] == provider["provides"][r]["interface"]: |
619 | - debug_msg("Found interface for relation '%s': %s" % (relation, r)) |
620 | + if (consumer["requires"][r]["interface"] == |
621 | + provider["provides"][r]["interface"]): |
622 | + debug_msg("Found interface for relation '%s': %s" |
623 | + % (relation, r)) |
624 | return r, r |
625 | - # Also try to match by interface type if not found by matching interface name |
626 | + # Also try to match by interface type if not found by matching |
627 | + # interface name |
628 | for cr in consumer["requires"].keys(): |
629 | for pr in provider["provides"].keys(): |
630 | - if consumer["requires"][cr]["interface"] == provider["provides"][pr]["interface"]: |
631 | - debug_msg("Found interface for relation '%s': %s" % (relation, cr)) |
632 | + if (consumer["requires"][cr]["interface"] == |
633 | + provider["provides"][pr]["interface"]): |
634 | + debug_msg("Found interface for relation '%s': %s" |
635 | + % (relation, cr)) |
636 | return cr, pr |
637 | |
638 | - |
639 | consumer = CHARMS[relation[0]]["metadata"] |
640 | provider = CHARMS[relation[1]]["metadata"] |
641 | |
642 | @@ -389,6 +444,7 @@ |
643 | return False |
644 | return charm["subordinate"] |
645 | |
646 | + |
647 | def ensure_interface(CHARMS, services, interface): |
648 | """ given a specified interface, ensure they are at least |
649 | listed in the metadata of two charms. """ |
650 | @@ -396,17 +452,18 @@ |
651 | provider = CHARMS[services[1]]["metadata"] |
652 | if interface not in consumer["requires"].keys(): |
653 | log.error("Interface %s not listed as a required interface of %s", |
654 | - services[0]) |
655 | + services[0]) |
656 | return False |
657 | if interface not in provider["providers"].keys(): |
658 | log.error("Interface %s not listed as a provided interface of %s", |
659 | - services[1]) |
660 | + services[1]) |
661 | return False |
662 | return True |
663 | |
664 | + |
665 | def relations_json_to_tuples(relations): |
666 | """ convert deployment.cfg json relation configuration |
667 | - to a list of tuples that describe a relation as (consumer, provider) |
668 | + to a list of tuples that describe a relation as (consumer, provider) |
669 | """ |
670 | t = {} |
671 | for consumer in relations.keys(): |
672 | @@ -415,11 +472,12 @@ |
673 | t[relations[consumer]["weight"]].append((consumer, provider)) |
674 | return t |
675 | |
676 | + |
677 | def wait_for_started(debug, env, msg="Waiting for all units started", sleep=1): |
678 | if not debug: |
679 | sys.stdout.write(msg) |
680 | sys.stdout.flush() |
681 | - while ensure_started(juju_status(env)) == False: |
682 | + while ensure_started(juju_status(env)) is False: |
683 | if not debug: |
684 | sys.stdout.write(".") |
685 | sys.stdout.flush() |
686 | @@ -427,6 +485,7 @@ |
687 | if not debug: |
688 | sys.stdout.write("\n") |
689 | |
690 | + |
691 | def scrub_zookeeper(deleted_services): |
692 | """ remove charm nodes from ZK for destroyed services. useful |
693 | when recycling a juju environment and not having to worry |
694 | @@ -440,7 +499,7 @@ |
695 | from twisted.internet.defer import inlineCallbacks |
696 | from juju.environment.config import EnvironmentsConfig |
697 | from juju.state.service import ServiceStateManager |
698 | - import zookeeper; |
699 | + import zookeeper |
700 | |
701 | env_config = EnvironmentsConfig() |
702 | env_config.load_or_write_sample() |
703 | @@ -453,7 +512,6 @@ |
704 | def _clean_juju_state(): |
705 | zookeeper.set_debug_level(0) |
706 | provider = environment.get_machine_provider() |
707 | - storage = provider.get_file_storage() |
708 | |
709 | client = yield provider.connect() |
710 | charms = yield client.get_children("/charms") |
711 | @@ -482,6 +540,7 @@ |
712 | reactor.callWhenRunning(_clean_juju_state) |
713 | reactor.run() |
714 | |
715 | + |
716 | def load_deployment(config, deployment): |
717 | if deployment not in config.keys(): |
718 | log.error("deployment %s not found.", deployment) |
719 | @@ -499,8 +558,8 @@ |
720 | try: |
721 | cur = config[parent] |
722 | except KeyError: |
723 | - log.error("Could not find parent deployment in config: %s" %\ |
724 | - parent) |
725 | + log.error("Could not find parent deployment in config: %s" % |
726 | + parent) |
727 | exit(1) |
728 | |
729 | base = cur |
730 | @@ -510,8 +569,8 @@ |
731 | deploy_config = reduce(dict_merge, configs) |
732 | |
733 | series = deploy_config.get('series') |
734 | - charms = deploy_config.get('services',{}) |
735 | - overrides = deploy_config.get('overrides',{}) |
736 | + charms = deploy_config.get('services', {}) |
737 | + overrides = deploy_config.get('overrides', {}) |
738 | if 'relations' in deploy_config: |
739 | relations = relations_json_to_tuples(deploy_config["relations"]) |
740 | else: |