Merge ~smigiel-dariusz/cloud-upgrade-planner:pre_integration into cloud-upgrade-planner:master
- Git
- lp:~smigiel-dariusz/cloud-upgrade-planner
- pre_integration
- Merge into master
Proposed by
Dariusz Smigiel
Status: | Merged |
---|---|
Approved by: | James Troup |
Approved revision: | 3a2068a958d16e398e45fcf0a1cde80e16d137b5 |
Merged at revision: | bf1c391b152991cfca9aa6311e471e48302c7f1d |
Proposed branch: | ~smigiel-dariusz/cloud-upgrade-planner:pre_integration |
Merge into: | cloud-upgrade-planner:master |
Diff against target: |
1456 lines (+1412/-0) 5 files modified
cloud_upgrade_planner/common.py (+76/-0) cloud_upgrade_planner/openstack_managed_upgrade.py (+278/-0) cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py (+328/-0) setup.py (+2/-0) tests/test_ubuntu_series_upgrade_for_openstack.py (+728/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
BootStack Reviewers | Pending | ||
BootStack Reviewers | Pending | ||
Review via email: mp+409606@code.launchpad.net |
Commit message
Migration of bootstack-tools to cloud-upgrade-
Description of the change
To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Revision history for this message
James Troup (elmo) wrote : | # |
I'll merge this, but there are somethings that need fixed in followups please:
1) assumption of /home/ubuntu
2) reference to Canonical Wiki
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Change successfully merged at revision bf1c391b152991c
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloud_upgrade_planner/cli.py b/cloud_upgrade_planner/cli.py |
2 | old mode 100755 |
3 | new mode 100644 |
4 | diff --git a/cloud_upgrade_planner/common.py b/cloud_upgrade_planner/common.py |
5 | new file mode 100644 |
6 | index 0000000..a31016e |
7 | --- /dev/null |
8 | +++ b/cloud_upgrade_planner/common.py |
9 | @@ -0,0 +1,76 @@ |
10 | +import re |
11 | +import sys |
12 | +import yaml |
13 | + |
14 | + |
15 | +def find_units_for_app(app, model_apps): |
16 | + return list(model_apps[app]['units'].keys()) |
17 | + |
18 | + |
19 | +def find_leader_for_app(app, model_apps): |
20 | + for unit in model_apps[app]['units'].keys(): |
21 | + if 'leader' in model_apps[app]['units'][unit]: |
22 | + return unit |
23 | + |
24 | + |
25 | +def find_charms_apps_in_model(charms, model_apps, svc2charm): |
26 | + apps = [] |
27 | + for charm in charms: |
28 | + apps_of_charm = [app for app in model_apps.keys() if svc2charm[app] == charm] |
29 | + apps.extend(apps_of_charm) |
30 | + return apps |
31 | + |
32 | + |
33 | +def get_model_apps(status_file): |
34 | + try: |
35 | + with open(status_file) as myjsfy: |
36 | + return yaml.safe_load(myjsfy)["applications"] |
37 | + except IOError: |
38 | + print("Please run a fresh copy of juju status --format yaml > jsfy") |
39 | + sys.exit(1) |
40 | + |
41 | + |
42 | +def find_apps_from_charm(charm, model_apps): |
43 | + # Match local: or cs: + ~user/charm, series/charm, or charm |
44 | + regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format(charm)) |
45 | + return [app for app in model_apps.keys() |
46 | + if re.match(regex, model_apps[app]['charm']) is not None] |
47 | + |
48 | + |
49 | +def find_machine_from_unit(app, unit, model_apps): |
50 | + return model_apps[app]['units'][unit]['machine'] |
51 | + |
52 | + |
53 | +def render_app_to_charm_dict(model_apps): |
54 | + app2charm = {} |
55 | + # Match local: or cs: + ~user/charm, series/charm, or charm |
56 | + # regex groups are: |
57 | + # 0: whole match |
58 | + # 1: store|local |
59 | + # 2: ~namespace or series |
60 | + # 3: charm name |
61 | + regex = re.compile(r"(local:|cs:)(~*.*/|)(.*)-\d+") |
62 | + for app in model_apps.keys(): |
63 | + app2charm[app] = re.match(regex, model_apps[app]['charm']).group(3) |
64 | + return app2charm |
65 | + |
66 | + |
67 | +def available_upgrade(app, model_apps): |
68 | + if 'can-upgrade-to' in model_apps[app]: |
69 | + return True |
70 | + print(" # No charm upgrade available for {}. " |
71 | + "Running: {}".format(app, model_apps[app]["charm"])) |
72 | + return False |
73 | + |
74 | + |
75 | +def find_hacluster_for_unit(app, unit, model_apps): |
76 | + if 'subordinates' not in model_apps[app]['units'][unit]: |
77 | + return None |
78 | + subordinate_units = model_apps[app]['units'][unit]['subordinates'].keys() |
79 | + regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format('hacluster')) |
80 | + for subunit in subordinate_units: |
81 | + subunit_app = subunit.split("/")[0] |
82 | + if re.match(regex, model_apps[subunit_app]['charm']) is not None: |
83 | + return subunit |
84 | + return None |
85 | + |
86 | diff --git a/cloud_upgrade_planner/openstack_managed_upgrade.py b/cloud_upgrade_planner/openstack_managed_upgrade.py |
87 | new file mode 100644 |
88 | index 0000000..4b4960e |
89 | --- /dev/null |
90 | +++ b/cloud_upgrade_planner/openstack_managed_upgrade.py |
91 | @@ -0,0 +1,278 @@ |
92 | +#!/usr/bin/python3 |
93 | + |
94 | +# requres file in current directory named "jsfy" created with |
95 | +# juju status --format yaml > jsfy |
96 | +# (version of juju must support leadership in yaml output) |
97 | + |
98 | +# Assumptions |
99 | +# we assume you're deploying either cs:charmname or cs:~namespace/charmname |
100 | +# or local:<series>/<charm> versions of charms, and does not support |
101 | +# differently-named charms. |
102 | + |
103 | +# set this to the destination openstack-origin of your choice |
104 | +# TODO: |
105 | +# include optional unit pauses for pure clean HA |
106 | +# include evacuating and monitoring each nova compute node |
107 | +# include neutron-gateway HA router migrations if pausing |
108 | +# inclue gnocchi/aodh/octavia/placement installs |
109 | + |
110 | +import argparse |
111 | +import re |
112 | +import sys |
113 | + |
114 | +from cloud_upgrade_planner import common |
115 | + |
116 | +# The phases of the update. After any of these phases, updates can be paused |
117 | +# Each phase is recommended to take a day, with phases 3 and 4 being combined |
118 | +PHASE_APPS = {0: ["openstack-service-checks", |
119 | + "percona-cluster", |
120 | + "rabbitmq-server"], |
121 | + 1: ["keystone", |
122 | + "ceph-mon"], |
123 | + 2: ["ceph-fs", |
124 | + "ceph-radosgw", |
125 | + "swift-proxy", |
126 | + "swift-storage", |
127 | + "cinder", |
128 | + "cinder-ceph", |
129 | + "cinder-backup", |
130 | + "barbican", |
131 | + "glance", |
132 | + "aodh", |
133 | + "gnocchi", |
134 | + "ceilometer", |
135 | + "designate", |
136 | + "designate-bind", |
137 | + "neutron-api", |
138 | + "nova-cloud-controller", |
139 | + "heat", |
140 | + "manila", |
141 | + "manila-generic", |
142 | + "openstack-dashboard", |
143 | + "placement", |
144 | + "octavia", |
145 | + "easyrsa", |
146 | + "ovn-central"], |
147 | + 3: ["neutron-gateway", |
148 | + "nova-compute", |
149 | + "neutron-openvswitch", |
150 | + "ovn-chassis", |
151 | + "ceph-osd"]} |
152 | + |
153 | +ACTION_MANAGED_PHASES = [1, 3] |
154 | +DEFERRED_WAIT_PHASES = [2] |
155 | + |
156 | +# These charms use source: instead of openstack-origin: config flags |
157 | +ORIGIN_VAR_IS_SOURCE = ["ceph-osd", |
158 | + "ceph-mon", |
159 | + "ceph-radosgw", |
160 | + "percona-cluster", |
161 | + "rabbitmq-server"] |
162 | + |
163 | +# These charms do not have openstack-origin/source config flags |
164 | +CHARM_UPGRADES_ONLY = ["neutron-openvswitch", |
165 | + "ovn-chassis", |
166 | + "memcached", |
167 | + "mongodb", |
168 | + "graylog", |
169 | + "prometheus", |
170 | + "openstack-service-checks", |
171 | + "cinder-ceph", |
172 | + "cinder-backup", |
173 | + "easyrsa", |
174 | + "designate-bind"] |
175 | + |
176 | +# Enable deferred restarts on these charms as they are upgraded |
177 | +DEFERRED_RESTART_CHARMS = [ |
178 | + "neutron-gateway", |
179 | + "neutron-openvswitch", |
180 | + "ovn-central", |
181 | + "ovn-chassis", |
182 | + "ovn-dedicated-chassis", |
183 | + "rabbitmq-server", |
184 | +] |
185 | + |
186 | +# TODO: When upgrading to these versions, add step to install these charms |
187 | +# between Phase 1 and Phase 2 |
188 | +NEW_CHARMS_FOR_VERSIONS = {"train": ["placement"], |
189 | + "queens": ["octavia"], |
190 | + "ocata": ["aodh", "gnocchi"]} |
191 | + |
192 | +# This can be used both for required --switch changes for 19.04+ or can be |
193 | +# populated with specific version URLs |
194 | +# e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be |
195 | +# switched to cs:~openstack-charmers/cinder-backup |
196 | +# Anything not called out with these paths will be switched to cs:<charmname> |
197 | +# if currently running cs:~*/<charmname> or local:<charmname> |
198 | +CHARMSTORE_TILDE_PATHS = { |
199 | + "cinder-backup": "cs:~openstack-charmers/cinder-backup", |
200 | + "easyrsa": "cs:~containers/easyrsa", |
201 | + "policy-routing": "cs:~canonical-bootstack/policy-routing", |
202 | + "infra-node": "cs:~canonical-bootstack/infra-node", |
203 | +} |
204 | + |
205 | +JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing" |
206 | +WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES) |
207 | + |
208 | +PRE_UPGRADE_STEPS = """ |
209 | +This playbook assumes all applications are running an identical openstack-origin/source of the prior version openstack. |
210 | +You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify the first and last apps are at the same level. |
211 | + |
212 | +### IMPORTANT ### |
213 | +Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders |
214 | + |
215 | +Please read the wiki for openstack upgrades for known issues and read the upstream charm guide release notes and issues. |
216 | +https://wiki.canonical.com/CDO/IS/Bootstack/Playbooks/OpenstackReleaseUpgradeActionManaged |
217 | +https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html |
218 | + |
219 | +There may be several things that need to be updated before changing to a new version of openstack, such as |
220 | +setting keystone tokens to fernet before upgrade to Rocky, |
221 | +ensuring that the ceph minimum_osd_version is set for the current version of ceph before upgrade, |
222 | +before upgrading to train, ensure that you've installed octavia and have migrated loadbalancers, as lbaasv2 is deprecated. |
223 | + |
224 | +See the referenced docs for these steps. |
225 | + |
226 | +After any upgrades, please check status of services via nagios/thruk to ensure you're not exhibiting new alerts. |
227 | + |
228 | +This upgrade guide does not cover router or VM migrations to avoid data-plane outages during package upgrades. |
229 | +Please be sure to plan accordingly based on customer request. |
230 | + |
231 | +All charms should be at the latest version before upgrading openstack. |
232 | +""" |
233 | + |
234 | +POST_UPGRADE_STEPS = """ |
235 | + |
236 | +# POST UPGRADE ACTIONS |
237 | +Perform these steps after the cloud has completed upgrades |
238 | + |
239 | +Validate Nagios is clean and any disabled nova-compute services have been re-enabled. |
240 | + |
241 | +Update ceph minimum_osd_version per docs: |
242 | +https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html#ceph-option-require-osd-release |
243 | +""" |
244 | + |
245 | + |
246 | +def plan_charm_upgrade(app, model_apps, svc2charm): |
247 | + switch_args, config_filename = "", "" |
248 | + charm = svc2charm[app] |
249 | + current_charm = model_apps[app]['charm'] |
250 | + # strip version number (eg: cs:~foo/my-charm-123 becomes cs:~foo/my-charm) |
251 | + current_charm_path = current_charm.rsplit("-", 1)[0] |
252 | + target_tilde = CHARMSTORE_TILDE_PATHS.get(charm, None) |
253 | + |
254 | + if "local:" in current_charm_path and not target_tilde: |
255 | + # Provide warning to user that local charm was found and needs sanity check |
256 | + print(""" |
257 | + WARNING!!! Application {app} has local charm path {current_charm} |
258 | + Suggesting switch to latest promulgated cs: version. |
259 | + Please ensure updates in local charm are in upstream charm before |
260 | + running upgrade-charm on this application. |
261 | + """.format(app=app, current_charm=current_charm)) |
262 | + switch_args += " --switch cs:{}".format(charm) |
263 | + elif target_tilde and current_charm_path != target_tilde: |
264 | + switch_args += " --switch {}".format(target_tilde) |
265 | + elif "cs:~" in current_charm_path or "/" in current_charm_path: |
266 | + print(""" |
267 | + WARNING!!! Application {app} from unrecognized path {current_charm} |
268 | + Suggesting switch to latest promulgated cs: version. |
269 | + Please ensure updates in the above branched charm are in the latest |
270 | + promulgated charm before running upgrade-charm on this application. |
271 | + """.format(app=app, current_charm=current_charm)) |
272 | + switch_args += " --switch {}".format("cs:{}".format(charm)) |
273 | + |
274 | + if charm in DEFERRED_RESTART_CHARMS: |
275 | + config_filename = "deferred_restart_config_{}.yaml".format(app) |
276 | + # Format of config file per 'juju deploy --help' doc |
277 | + # application-name: |
278 | + # option1: value1 |
279 | + # option2: value2 |
280 | + with open(config_filename, "w") as conf: |
281 | + print("{}:\n enable-auto-restarts: False".format(app), file=conf) |
282 | + |
283 | + if switch_args or common.available_upgrade(app, model_apps): |
284 | + config_args = ' --config {}'.format(config_filename) if config_filename else "" |
285 | + print(" juju upgrade-charm {}{}{}".format(app, switch_args, config_args)) |
286 | + print(" {}\n\n".format(WATCH_COMMAND)) |
287 | + elif config_filename: |
288 | + config_args = ' --file {}'.format(config_filename) if config_filename else "" |
289 | + print(" juju config {}{}".format(app, config_args)) |
290 | + print(" {}\n\n".format(WATCH_COMMAND)) |
291 | + |
292 | + |
293 | +def plan_action_managed_phase_app(app, model_apps, release): |
294 | + leader = common.find_leader_for_app(app, model_apps) |
295 | + units = common.find_units_for_app(app, model_apps) |
296 | + units.remove(leader) |
297 | + unit_list = " ".join(units) |
298 | + upgradeorder = [leader] |
299 | + upgradeorder.extend(units) |
300 | + print(" juju config {} " |
301 | + "action-managed-upgrade=true " |
302 | + "openstack-origin={}".format(app, release)) |
303 | + print(" {}".format(WATCH_COMMAND)) |
304 | + for unit in upgradeorder: |
305 | + print(" juju run-action --wait {} " |
306 | + "openstack-upgrade".format(unit)) |
307 | + |
308 | + |
309 | +def parse_args(argv): |
310 | + """Process CLI arguments.""" |
311 | + parser = argparse.ArgumentParser( |
312 | + prog="openstack_managed_upgrade.py", |
313 | + description=( |
314 | + "this program uses the output from juju status --format yaml" |
315 | + "and creates a playbook of juju upgrade actions to upgrade the" |
316 | + "version of Openstack to the target version" |
317 | + ), |
318 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
319 | + ) |
320 | + parser.add_argument("-f", "--status-file", dest="status_file", type=str, |
321 | + default="jsfy", |
322 | + help="file containing 'juju status --format yaml' output") |
323 | + parser.add_argument("-c", "--charms-only", dest="charms_only", |
324 | + action="store_true", |
325 | + help="Only plan charm upgrades") |
326 | + parser.add_argument("release", type=str, |
327 | + help="Set release to UCA target for upgrade such as: 'cloud:bionic-ussuri'") |
328 | + return parser.parse_args(argv) |
329 | + |
330 | + |
331 | +def main(argv=None): |
332 | + args = parse_args(argv) |
333 | + model_apps = common.get_model_apps(args.status_file) |
334 | + svc2charm = common.render_app_to_charm_dict(model_apps) |
335 | + print(PRE_UPGRADE_STEPS) |
336 | + for phase in sorted(PHASE_APPS): |
337 | + print("Phase {}\n========".format(phase)) |
338 | + this_phase_apps = common.find_charms_apps_in_model(PHASE_APPS[phase], |
339 | + model_apps, svc2charm) |
340 | + for app in this_phase_apps: |
341 | + # skip landscape upgrades |
342 | + if "landscape" in app: |
343 | + continue |
344 | + print(" App: {}".format(app)) |
345 | + print(" Charm: {}".format(svc2charm[app])) |
346 | + print(" Commands:") |
347 | + plan_charm_upgrade(app, model_apps, svc2charm) |
348 | + if args.charms_only: |
349 | + continue |
350 | + if svc2charm[app] in CHARM_UPGRADES_ONLY: |
351 | + # no openstack repo upgrade for these charms |
352 | + continue |
353 | + if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: |
354 | + # not possible to be action-managed-upgrade |
355 | + print(" juju config {} source={}".format(app, args.release)) |
356 | + elif phase in ACTION_MANAGED_PHASES: |
357 | + plan_action_managed_phase_app(app, model_apps, args.release) |
358 | + else: |
359 | + print(" juju config {} openstack-origin={}" |
360 | + " action-managed-upgrade=false".format(app, args.release)) |
361 | + if phase not in DEFERRED_WAIT_PHASES: |
362 | + print(" {}\n".format(WATCH_COMMAND)) |
363 | + if not args.charms_only and phase in DEFERRED_WAIT_PHASES: |
364 | + print("\n {}\n".format(WATCH_COMMAND)) |
365 | + |
366 | + print(POST_UPGRADE_STEPS) |
367 | + |
368 | +if __name__ == "__main__": |
369 | + main(sys.argv[1:]) |
370 | diff --git a/cloud_upgrade_planner/plan.py b/cloud_upgrade_planner/plan.py |
371 | old mode 100755 |
372 | new mode 100644 |
373 | diff --git a/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py |
374 | new file mode 100644 |
375 | index 0000000..8db72c1 |
376 | --- /dev/null |
377 | +++ b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py |
378 | @@ -0,0 +1,328 @@ |
379 | +#!/usr/bin/python3 |
380 | + |
381 | +# requres file in current directory named "jsfy" created with |
382 | +# juju status --format yaml > jsfy |
383 | +# (version of juju must support leadership in yaml output) |
384 | + |
385 | +# Assumptions |
386 | +# we assume you're deploying either cs:charmname or cs:~namespace/charmname |
387 | +# or local:<series>/<charm> versions of charms, and does not support |
388 | +# differently-named charms. |
389 | + |
390 | +# set this to the destination openstack-origin of your choice |
391 | +# TODO: add in parseargs |
392 | +# include stepped version callouts |
393 | +# include evacuating and monitoring each nova compute node |
394 | +# include neutron-gateway HA router migrations if pausing |
395 | + |
396 | +import re |
397 | +import sys |
398 | +import yaml |
399 | + |
400 | +from cloud_upgrade_planner import common |
401 | + |
402 | +# Set this to your target platform ### See TODOs |
403 | +UCA = "distro" |
404 | +series = "bionic" |
405 | +svc2charm = {} |
406 | + |
407 | +# The phases of the update. After any of these phases, updates can be paused |
408 | +# Each phase is recommended to take a day, with phases 3 and 4 being combined |
409 | +PHASE_APPS = {0: ["percona-cluster", |
410 | + "rabbitmq-server", |
411 | + "ceph-mon", |
412 | + "ovn-central", |
413 | + "etcd", |
414 | + "vault"], |
415 | + 1: ["keystone", |
416 | + "ceph-fs", |
417 | + "ceph-radosgw", |
418 | + "swift-proxy", |
419 | + "swift-storage", |
420 | + "cinder", |
421 | + "barbican", |
422 | + "glance", |
423 | + "aodh", |
424 | + "gnocchi", |
425 | + "ceilometer", |
426 | + "designate", |
427 | + "designate-bind", |
428 | + "neutron-api", |
429 | + "nova-cloud-controller", |
430 | + "heat", |
431 | + "manila", |
432 | + "manila-generic", |
433 | + "openstack-dashboard", |
434 | + "placement", |
435 | + "octavia", |
436 | + "memcached", |
437 | + "easyrsa"], |
438 | + 2: ["nova-compute", |
439 | + "neutron-gateway", |
440 | + "ceph-osd"], |
441 | +} |
442 | + |
443 | +# These charms use source: instead of openstack-origin: config flags |
444 | +ORIGIN_VAR_IS_SOURCE = ["ceph-osd", |
445 | + "ceph-mon", |
446 | + "ceph-fs", |
447 | + "ceph-radosgw", |
448 | + "percona-cluster", |
449 | + "rabbitmq-server"] |
450 | + |
451 | +# These charms only need the OS upgraded, no UCA tracking |
452 | +NO_ORIGIN = ["designate-bind", "vault"] |
453 | + |
454 | +# These charms do not have openstack-origin/source config flags |
455 | +CHARM_UPGRADES_ONLY = ["neutron-openvswitch", |
456 | + "memcached", |
457 | + "mongodb", |
458 | + "graylog", |
459 | + "prometheus", |
460 | + "openstack-service-checks", |
461 | + "cinder-ceph", |
462 | + "cinder-backup"] |
463 | + |
464 | +NO_PAUSE_APPS = ["ceph-mon", |
465 | + "ceph-osd", |
466 | + "neutron-gateway", |
467 | + "nova-compute"] |
468 | + |
469 | +POST_APP_UPGRADE_ACTIONS_FOR_LEADERS = { |
470 | + "percona-cluster": "complete-cluster-series-upgrade", |
471 | + "rabbitmq-server": "complete-cluster-series-upgrade", |
472 | + "ceilometer": "ceilometer-upgrade", |
473 | +} |
474 | + |
475 | +POST_APP_UPGRADE_MANUAL_STEPS = { |
476 | + "vault": "You will need to unseal the vault after each unit reboots", |
477 | +} |
478 | +# This can be used both for required --switch changes for 19.04+ or can be |
479 | +# populated with specific version URLs |
480 | +# e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be |
481 | +# switched to cs:~openstack-charmers/cinder-backup |
482 | +CHARMSTORE_PATHS = {"cinder-backup": "cs:~openstack-charmers/cinder-backup"} |
483 | +JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing" |
484 | +WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES) |
485 | + |
486 | +PRE_UPGRADE_STEPS = """ |
487 | +This playbook assumes your current openstack revision is at the highest level of the current ubuntu series: |
488 | + trusty-mitaka |
489 | + xenial-queens |
490 | + bionic-ussuri |
491 | +You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify. |
492 | + |
493 | +### IMPORTANT ### |
494 | +Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders |
495 | + |
496 | +Perform these steps to prepare your environment/directory for the remaining playbook. |
497 | + |
498 | +# Login to MAAS WebUI and add the new series to MAAS under the Images tab. |
499 | + |
500 | +# Create "local" file for setting your apt preferences - Required for DRU to be non-interactive |
501 | +cat > local << EOF |
502 | +DPkg::options { "--force-confdef"; "--force-confnew"; } |
503 | +EOF |
504 | + |
505 | +# update the default-series of the model to the new series |
506 | +juju model-config default-series=bionic # or focal |
507 | + |
508 | +""" |
509 | + |
510 | +POST_UPGRADE_STEPS = """ |
511 | + |
512 | +# POST UPGRADE ACTIONS |
513 | +Perform these steps after the cloud has completed upgrades |
514 | + |
515 | +# Remove the "local" file added to apt configs for DRU, as it's not correct for typical upgrades |
516 | +juju run --all 'sudo rm /etc/apt/apt.conf.d/local' |
517 | + |
518 | +N.B. Do not upgrade Nagios from xenial to bionic until all cloud nodes have completed upgrade to bionic. |
519 | + |
520 | +Upgrade all remaining machines (typically LMA stack) not previously captured: |
521 | +# find the units with: |
522 | +juju machines|grep <previous release|xenial|bionic|etc> |
523 | + |
524 | +# Check each of those machines that they're not already upgraded by listing /etc/os-release file |
525 | +juju run --machine <comma,separated,list,of,machines,left,at,old,version> 'hostname; grep VERSION_CODENAME /etc/os-release' |
526 | + |
527 | +# perform steps similiar to above for the remaining machines/applications. |
528 | +""" |
529 | + |
530 | + |
531 | +def plan_parallel(model_apps, parallel_apps, machines_planned): |
532 | + cluster_pause_tasks = [] |
533 | + pause_tasks = [] |
534 | + leader_prepare_tasks = [] |
535 | + non_leader_prepare_tasks = [] |
536 | + upgrade_machines = [] |
537 | + post_upgrade_tasks = [] |
538 | + leader_complete_tasks = [] |
539 | + non_leader_complete_tasks = [] |
540 | + cleanup_tasks = [] |
541 | + for app in parallel_apps: |
542 | + print(" juju set-series {} {}".format(app, series)) |
543 | + leader = common.find_leader_for_app(app, model_apps) |
544 | + units = common.find_units_for_app(app, model_apps) |
545 | + units.remove(leader) |
546 | + |
547 | + post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None) |
548 | + manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None) |
549 | + if post_leader_action: |
550 | + cleanup_tasks.append(" juju run-action --wait {} {}".format(leader, post_leader_action)) |
551 | + if manual_steps: |
552 | + cleanup_tasks.append(" ## Manual step: {}".format(manual_steps)) |
553 | + |
554 | + for unit in units: |
555 | + unit_machine = common.find_machine_from_unit(app, unit, model_apps) |
556 | + if unit_machine in machines_planned: |
557 | + # CONSIDER: Note that we decided to skip dist-upgrade since it was already done? |
558 | + continue |
559 | + hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps) |
560 | + if hacluster_unit is not None: |
561 | + cluster_pause_tasks.append(" juju run-action --wait {} pause".format(hacluster_unit)) |
562 | + if svc2charm[app] not in NO_PAUSE_APPS: |
563 | + pause_tasks.append(" juju run-action --wait {} pause".format(unit)) |
564 | + non_leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(unit_machine, series, unit)) |
565 | + non_leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(unit_machine, unit)) |
566 | + upgrade_machines.append(unit_machine) |
567 | + machines_planned.append(unit_machine) |
568 | + |
569 | + leader_machine = common.find_machine_from_unit(app, leader, model_apps) |
570 | + if leader_machine in machines_planned: |
571 | + continue |
572 | + # upgrade leader unit's machine |
573 | + leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(leader_machine, series, leader)) |
574 | + upgrade_machines.append(leader_machine) |
575 | + leader_complete_tasks.append(" echo 'Check status of workload for {}'".format(leader)) |
576 | + if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: |
577 | + post_upgrade_tasks.append(" juju config {} source={}".format(app, UCA)) |
578 | + elif svc2charm[app] not in NO_ORIGIN: |
579 | + post_upgrade_tasks.append(" juju config {} openstack-origin={}".format(app,UCA)) |
580 | + leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(leader_machine, leader)) |
581 | + machines_planned.append(leader_machine) |
582 | + print(" # Pause non-leaders") |
583 | + print("\n".join(cluster_pause_tasks)) |
584 | + print("\n".join(pause_tasks)) |
585 | + print(" # Prepare leaders - These can be done in parallel before non-leaders") |
586 | + print("\n".join(leader_prepare_tasks)) |
587 | + print(" # Prepare non-leaders - These can be done in parallel after leaders") |
588 | + print("\n".join(non_leader_prepare_tasks)) |
589 | + print(" for i in {}; do juju scp local $i: ; done".format(" ".join(upgrade_machines))) |
590 | + machine_spec = ",".join(upgrade_machines) |
591 | + print(" # Update all machines to latest patches") |
592 | + print(" ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step") |
593 | + print(" juju run --machine {} --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format(machine_spec)) |
594 | + print(" # Perform do-release-upgrade") |
595 | + print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(machine_spec)) |
596 | + print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") |
597 | + print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") |
598 | + print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") |
599 | + print(" # Reboot all API machines into {}".format(series)) |
600 | + print(" juju run --machine {} --timeout=10m 'sudo init 6'".format(machine_spec)) |
601 | + print(" # Post Upgrade tasks") |
602 | + print("\n".join(post_upgrade_tasks)) |
603 | + print(" # Complete leaders - These can all be done in parallel before non-leaders") |
604 | + print("\n".join(leader_complete_tasks)) |
605 | + print(" # Complete non-leaders - These can all be done in parallel after leaders complete") |
606 | + print("\n".join(non_leader_complete_tasks)) |
607 | + print(" # Post App Upgrade cleanup tasks") |
608 | + print("\n".join(cleanup_tasks)) |
609 | + print("\n") |
610 | + |
611 | + |
612 | +def main(): |
613 | + model_apps = common.get_model_apps("jsfy") |
614 | + machines_planned = [] |
615 | + |
616 | + print(PRE_UPGRADE_STEPS) |
617 | + |
618 | + for phase in sorted(PHASE_APPS): |
619 | + print("Phase {}\n========".format(phase)) |
620 | + this_phase_apps = [] |
621 | + for app in PHASE_APPS[phase]: |
622 | + if 'landscape' in app: |
623 | + continue |
624 | + services = common.find_apps_from_charm(app, model_apps) |
625 | + this_phase_apps.extend(services) |
626 | + for service in services: |
627 | + svc2charm[service] = app |
628 | + if phase == 1: |
629 | + plan_parallel(model_apps, this_phase_apps, machines_planned) |
630 | + continue |
631 | + for app in this_phase_apps: |
632 | + plan_sequential(model_apps, app, machines_planned) |
633 | + |
634 | + print(POST_UPGRADE_STEPS) |
635 | + |
636 | + |
637 | +def plan_sequential(model_apps, app, machines_planned): |
638 | + post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None) |
639 | + manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None) |
640 | + findlandscape = re.compile("landscape.*") |
641 | + if re.match(findlandscape, app) is not None: |
642 | + return |
643 | + print(" App: {}".format(app)) |
644 | + leader = common.find_leader_for_app(app, model_apps) |
645 | + units = common.find_units_for_app(app, model_apps) |
646 | + units.remove(leader) |
647 | + unit_list = " ".join(units) |
648 | + print(" Units {} {}".format(leader, unit_list)) |
649 | + print(" {}".format(WATCH_COMMAND)) |
650 | + print(" juju set-series {} {}".format(app, series)) |
651 | + for unit in units: |
652 | + hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps) |
653 | + if hacluster_unit is not None: |
654 | + print(" juju run-action --wait {} pause".format(hacluster_unit)) |
655 | + for unit in units: |
656 | + if svc2charm[app] not in NO_PAUSE_APPS: |
657 | + print(" juju run-action --wait {} pause".format(unit)) |
658 | + # gate for machine already done |
659 | + leader_machine = common.find_machine_from_unit(app, leader, model_apps) |
660 | + if leader_machine in machines_planned: |
661 | + print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine)) |
662 | + else: |
663 | + # upgrade leader unit's machine |
664 | + print(" juju upgrade-series {} prepare {} -y".format(leader_machine, series)) |
665 | + print(" juju scp local {}:".format(leader_machine)) |
666 | + print( |
667 | + " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format( |
668 | + leader_machine)) |
669 | + print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(leader_machine)) |
670 | + print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(leader_machine)) |
671 | + print(" sleep 120; jw; echo 'Check status of workload for {}'".format(leader)) |
672 | + if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: |
673 | + print(" juju config {} source={}".format(app, UCA)) |
674 | + elif svc2charm[app] not in NO_ORIGIN: |
675 | + print(" juju config {} openstack-origin={}".format(app, UCA)) |
676 | + print(" sleep 120; jw") |
677 | + if leader_machine in machines_planned: |
678 | + print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine)) |
679 | + else: |
680 | + print(" juju upgrade-series {} complete".format(leader_machine)) |
681 | + machines_planned.append(leader_machine) |
682 | + for unit in units: |
683 | + non_leader_machine = common.find_machine_from_unit(app, unit, model_apps) |
684 | + if non_leader_machine in machines_planned: |
685 | + print(" # Skipping dist-upgrade as already done prior for {}".format(non_leader_machine)) |
686 | + else: |
687 | + print(" juju upgrade-series {} prepare {} -y".format(non_leader_machine, series)) |
688 | + print(" juju scp local {}:".format(non_leader_machine)) |
689 | + print( |
690 | + " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format( |
691 | + non_leader_machine)) |
692 | + print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format( |
693 | + non_leader_machine)) |
694 | + print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(non_leader_machine)) |
695 | + print(" sleep 120; jw; echo 'Check status of workload for {}'".format(unit)) |
696 | + print(" juju upgrade-series {} complete".format(non_leader_machine)) |
697 | + machines_planned.append(non_leader_machine) |
698 | + if post_leader_action: |
699 | + print(" juju run-action --wait {} {}".format(leader, post_leader_action)) |
700 | + if manual_steps: |
701 | + print(" ## Manual step: {}".format(manual_steps)) |
702 | + print('\n') |
703 | + |
704 | + |
705 | +if __name__ == "__main__": |
706 | + main() |
707 | diff --git a/setup.py b/setup.py |
708 | index 8c310d4..a9ecfa7 100644 |
709 | --- a/setup.py |
710 | +++ b/setup.py |
711 | @@ -48,6 +48,8 @@ setuptools.setup( |
712 | "console_scripts": [ |
713 | "cloud-upgrade-planner=cloud_upgrade_planner.cli:main", |
714 | "update-charm-revisions=cloud_upgrade_planner.cli:refresh", |
715 | + "ubuntu-series-upgrade-for-openstack=cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack:main", |
716 | + "openstack-managed-upgrade=cloud_upgrade_planner.openstack_managed_upgrade:main", |
717 | ] |
718 | }, |
719 | setup_requires=["setuptools_scm"], |
720 | diff --git a/tests/test_cli.py b/tests/test_cli.py |
721 | old mode 100755 |
722 | new mode 100644 |
723 | diff --git a/tests/test_ubuntu_series_upgrade_for_openstack.py b/tests/test_ubuntu_series_upgrade_for_openstack.py |
724 | new file mode 100644 |
725 | index 0000000..8d80437 |
726 | --- /dev/null |
727 | +++ b/tests/test_ubuntu_series_upgrade_for_openstack.py |
728 | @@ -0,0 +1,728 @@ |
729 | +import pytest |
730 | +from unittest.mock import patch, mock_open, call |
731 | +import yaml |
732 | + |
733 | +from cloud_upgrade_planner import common as c |
734 | +from cloud_upgrade_planner import openstack_managed_upgrade as o |
735 | +from cloud_upgrade_planner import ubuntu_series_upgrade_for_openstack as u |
736 | + |
737 | + |
738 | +JUJU_FILE_MOCK = """ |
739 | +applications: |
740 | + aodh-mysql-router: |
741 | + can-upgrade-to: cs:mysql-router-11 |
742 | + charm: cs:mysql-router-6 |
743 | + easyrsa: |
744 | + can-upgrade-to: cs:~containers/easyrsa-408 |
745 | + charm: cs:~containers/easyrsa-345 |
746 | + units: |
747 | + easyrsa/0: |
748 | + leader: true |
749 | + machine: 7/lxd/3 |
750 | + hacluster-neutron: |
751 | + can-upgrade-to: cs:hacluster-78 |
752 | + charm: cs:hacluster-74 |
753 | + hacluster-nova: |
754 | + can-upgrade-to: cs:hacluster-78 |
755 | + charm: cs:hacluster-74 |
756 | + hacluster-vault: |
757 | + can-upgrade-to: cs:hacluster-78 |
758 | + charm: cs:hacluster-74 |
759 | + landscape-client: |
760 | + charm: cs:landscape-client-35 |
761 | + juju-lint: |
762 | + charm: local:focal/juju-lint-1 |
763 | + units: |
764 | + juju-lint/0: |
765 | + leader: true |
766 | + machine: 8/lxd/15 |
767 | + memcached: |
768 | + charm: cs:memcached-34 |
769 | + units: |
770 | + memcached/1: |
771 | + leader: true |
772 | + machine: 5/lxd/4 |
773 | + mysql-innodb-cluster: |
774 | + can-upgrade-to: cs:mysql-innodb-cluster-11 |
775 | + charm: cs:mysql-innodb-cluster-5 |
776 | + units: |
777 | + mysql-innodb-cluster/0: |
778 | + machine: 3/lxd/8 |
779 | + subordinates: |
780 | + landscape-client/65: true |
781 | + mysql-innodb-cluster/1: |
782 | + leader: true |
783 | + machine: 4/lxd/8 |
784 | + subordinates: |
785 | + landscape-client/66: true |
786 | + mysql-innodb-cluster/2: |
787 | + machine: 5/lxd/8 |
788 | + subordinates: |
789 | + landscape-client/67: true |
790 | + neutron-api: |
791 | + can-upgrade-to: cs:neutron-api-299 |
792 | + charm: cs:neutron-api-292 |
793 | + units: |
794 | + neutron-api/0: |
795 | + machine: 6/lxd/5 |
796 | + subordinates: |
797 | + hacluster-neutron/0: true |
798 | + landscape-client/69: true |
799 | + neutron-api-mysql-router/0: true |
800 | + neutron-api/1: |
801 | + machine: 7/lxd/6 |
802 | + subordinates: |
803 | + hacluster-neutron/1: true |
804 | + landscape-client/68: true |
805 | + neutron-api-mysql-router/1: true |
806 | + neutron-api/2: |
807 | + leader: true |
808 | + machine: 8/lxd/5 |
809 | + subordinates: |
810 | + hacluster-neutron/2: true |
811 | + landscape-client/70: true |
812 | + neutron-api-mysql-router/2: true |
813 | + nova-cloud-controller: |
814 | + can-upgrade-to: cs:nova-cloud-controller-358 |
815 | + charm: cs:nova-cloud-controller-353 |
816 | + units: |
817 | + nova-cloud-controller/0: |
818 | + machine: 6/lxd/6 |
819 | + subordinates: |
820 | + hacluster-nova/0: true |
821 | + landscape-client/72: true |
822 | + nova-cloud-controller-mysql-router/0: true |
823 | + nova-cloud-controller/1: |
824 | + machine: 7/lxd/7 |
825 | + subordinates: |
826 | + hacluster-nova/2: true |
827 | + landscape-client/73: true |
828 | + nova-cloud-controller-mysql-router/2: true |
829 | + nova-cloud-controller/2: |
830 | + leader: true |
831 | + machine: 8/lxd/6 |
832 | + subordinates: |
833 | + hacluster-nova/1: true |
834 | + landscape-client/71: true |
835 | + nova-cloud-controller-mysql-router/1: true |
836 | + nova-cloud-controller-mysql-router: |
837 | + can-upgrade-to: cs:mysql-router-11 |
838 | + charm: cs:mysql-router-6 |
839 | + nova-compute-kvm: |
840 | + can-upgrade-to: cs:nova-compute-334 |
841 | + charm: cs:nova-compute-325 |
842 | + units: |
843 | + nova-compute-kvm/1: |
844 | + machine: '10' |
845 | + subordinates: |
846 | + landscape-client/19: true |
847 | + nova-compute-kvm/10: |
848 | + machine: '19' |
849 | + subordinates: |
850 | + landscape-client/10: true |
851 | + nova-compute-kvm/11: |
852 | + machine: '20' |
853 | + subordinates: |
854 | + landscape-client/20: true |
855 | + nova-compute-kvm/12: |
856 | + machine: '21' |
857 | + subordinates: |
858 | + landscape-client/11: true |
859 | + nova-compute-kvm/13: |
860 | + machine: '22' |
861 | + subordinates: |
862 | + landscape-client/4: true |
863 | + nova-compute-kvm/7: |
864 | + leader: true |
865 | + machine: '16' |
866 | + subordinates: |
867 | + landscape-client/7: true |
868 | + nova-compute-kvm/9: |
869 | + machine: '18' |
870 | + subordinates: |
871 | + landscape-client/16: true |
872 | + rabbitmq-server: |
873 | + can-upgrade-to: cs:~llama-charmers-next/rabbitmq-server-6 |
874 | + charm: cs:~llama-charmers-next/rabbitmq-server-5 |
875 | + units: |
876 | + rabbitmq-server/4: |
877 | + machine: 6/lxd/12 |
878 | + rabbitmq-server/5: |
879 | + leader: true |
880 | + machine: 7/lxd/12 |
881 | + rabbitmq-server/6: |
882 | + machine: 8/lxd/11 |
883 | + vault: |
884 | + can-upgrade-to: cs:vault-50 |
885 | + charm: cs:vault-44 |
886 | + units: |
887 | + vault/0: |
888 | + machine: '0' |
889 | + subordinates: |
890 | + hacluster-vault/1: true |
891 | + landscape-client/24: true |
892 | + vault/1: |
893 | + leader: true |
894 | + machine: '1' |
895 | + subordinates: |
896 | + hacluster-vault/2: true |
897 | + landscape-client/23: true |
898 | + vault/2: |
899 | + machine: '2' |
900 | + subordinates: |
901 | + hacluster-vault/0: true |
902 | + landscape-client/22: true |
903 | +""" |
904 | +JUJU_FILE_DICT = yaml.safe_load(JUJU_FILE_MOCK)["applications"] |
905 | + |
906 | +SVC2CHARM = { |
907 | + "aodh-mysql-router": "mysql-router", |
908 | + "easyrsa": "easyrsa", |
909 | + "hacluster-neutron": "hacluster", |
910 | + "hacluster-nova": "hacluster", |
911 | + "hacluster-vault": "hacluster", |
912 | + "juju-lint": "juju-lint", |
913 | + "landscape-client": "landscape-client", |
914 | + "memcached": "memcached", |
915 | + "mysql-innodb-cluster": "mysql-innodb-cluster", |
916 | + "neutron-api": "neutron-api", |
917 | + "nova-cloud-controller": "nova-cloud-controller", |
918 | + "nova-cloud-controller-mysql-router": "mysql-router", |
919 | + "nova-compute-kvm": "nova-compute", |
920 | + "rabbitmq-server": "rabbitmq-server", |
921 | + "vault": "vault", |
922 | +} |
923 | + |
924 | + |
925 | +class TestCommon: |
926 | + @pytest.mark.parametrize( |
927 | + "app,expected", |
928 | + [ |
929 | + ("memcached", "memcached/1"), |
930 | + ("mysql-innodb-cluster", "mysql-innodb-cluster/1"), |
931 | + ("neutron-api", "neutron-api/2"), |
932 | + ], |
933 | + ) |
934 | + def test_find_leader_for_app(self, app, expected): |
935 | + output = c.find_leader_for_app(app, JUJU_FILE_DICT) |
936 | + assert output == expected |
937 | + |
938 | + def test_get_model_apps_missing_file(self): |
939 | + with pytest.raises(SystemExit): |
940 | + c.get_model_apps("") |
941 | + |
942 | + @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) |
943 | + def test_get_model_apps_provided_file(self, juju_file): |
944 | + output = c.get_model_apps("test") |
945 | + assert output == JUJU_FILE_DICT |
946 | + |
947 | + @pytest.mark.parametrize( |
948 | + "charms,expected", |
949 | + [ |
950 | + ( |
951 | + ["mysql-router", "nova-compute", "hacluster", "vault"], |
952 | + [ |
953 | + "aodh-mysql-router", |
954 | + "nova-cloud-controller-mysql-router", |
955 | + "nova-compute-kvm", |
956 | + "hacluster-neutron", |
957 | + "hacluster-nova", |
958 | + "hacluster-vault", |
959 | + "vault", |
960 | + ], |
961 | + ), |
962 | + (["non-existing-charm"], []), |
963 | + ], |
964 | + ) |
965 | + def test_find_charms_apps_in_model(self, charms, expected): |
966 | + output = c.find_charms_apps_in_model(charms, JUJU_FILE_DICT, SVC2CHARM) |
967 | + assert output == expected |
968 | + |
969 | + @pytest.mark.parametrize( |
970 | + "app,expected", |
971 | + [ |
972 | + ( |
973 | + "nova-compute-kvm", |
974 | + [ |
975 | + "nova-compute-kvm/1", |
976 | + "nova-compute-kvm/10", |
977 | + "nova-compute-kvm/11", |
978 | + "nova-compute-kvm/12", |
979 | + "nova-compute-kvm/13", |
980 | + "nova-compute-kvm/7", |
981 | + "nova-compute-kvm/9", |
982 | + ], |
983 | + ), |
984 | + ( |
985 | + "mysql-innodb-cluster", |
986 | + [ |
987 | + "mysql-innodb-cluster/0", |
988 | + "mysql-innodb-cluster/1", |
989 | + "mysql-innodb-cluster/2", |
990 | + ], |
991 | + ), |
992 | + ], |
993 | + ) |
994 | + def test_find_units_for_app(self, app, expected): |
995 | + output = c.find_units_for_app(app, JUJU_FILE_DICT) |
996 | + assert output == expected |
997 | + |
998 | + @pytest.mark.parametrize( |
999 | + "charm,expected", |
1000 | + [ |
1001 | + ("neutron-api", ["neutron-api"]), |
1002 | + ("nova-compute", ["nova-compute-kvm"]), |
1003 | + ( |
1004 | + "mysql-router", |
1005 | + ["aodh-mysql-router", "nova-cloud-controller-mysql-router"], |
1006 | + ), |
1007 | + ], |
1008 | + ) |
1009 | + def test_find_apps_from_charm(self, charm, expected): |
1010 | + output = c.find_apps_from_charm(charm, JUJU_FILE_DICT) |
1011 | + assert output == expected |
1012 | + |
1013 | + def test_find_units_for_app_not_in_phase(self): |
1014 | + with pytest.raises(KeyError): |
1015 | + c.find_units_for_app("nova-cloud-controller-mysql-router", JUJU_FILE_DICT) |
1016 | + |
1017 | + def test_find_units_for_app_missing(self): |
1018 | + with pytest.raises(KeyError): |
1019 | + c.find_units_for_app("appx", JUJU_FILE_DICT) |
1020 | + |
1021 | + @pytest.mark.parametrize( |
1022 | + "app,unit,expected", |
1023 | + [ |
1024 | + ("nova-compute-kvm", "nova-compute-kvm/9", "18"), |
1025 | + ("memcached", "memcached/1", "5/lxd/4"), |
1026 | + ], |
1027 | + ) |
1028 | + def test_find_machine_from_unit(self, app, unit, expected): |
1029 | + output = c.find_machine_from_unit(app, unit, JUJU_FILE_DICT) |
1030 | + assert output == expected |
1031 | + |
1032 | + def test_render_app_to_charm_dict(self): |
1033 | + output = c.render_app_to_charm_dict(JUJU_FILE_DICT) |
1034 | + assert output == SVC2CHARM |
1035 | + |
1036 | + @pytest.mark.parametrize( |
1037 | + "app,expected", |
1038 | + [ |
1039 | + ("aodh-mysql-router", True), |
1040 | + ("memcached", False), |
1041 | + ], |
1042 | + ) |
1043 | + def test_available_upgrade(self, app, expected): |
1044 | + output = c.available_upgrade(app, JUJU_FILE_DICT) |
1045 | + assert output == expected |
1046 | + |
1047 | + @pytest.mark.parametrize( |
1048 | + "app,unit,expected", |
1049 | + [ |
1050 | + ("nova-cloud-controller", "nova-cloud-controller/1", "hacluster-nova/2"), |
1051 | + ("neutron-api", "neutron-api/2", "hacluster-neutron/2"), |
1052 | + ("nova-compute-kvm", "nova-compute-kvm/1", None), # missing HA |
1053 | + ("memcached", "memcached/1", None), # missing subordinates |
1054 | + ], |
1055 | + ) |
1056 | + def test_find_hacluster_for_unit(self, app, unit, expected): |
1057 | + output = c.find_hacluster_for_unit(app, unit, JUJU_FILE_DICT) |
1058 | + assert output == expected |
1059 | + |
1060 | + |
1061 | +class TestOpenstackManagedUpgrade: |
1062 | + @patch("builtins.print") |
1063 | + @patch( |
1064 | + "cloud_upgrade_planner.common.find_units_for_app", |
1065 | + return_value=["neutron-api/0", "neutron-api/1", "neutron-api/2"], |
1066 | + ) |
1067 | + @patch( |
1068 | + "cloud_upgrade_planner.common.find_leader_for_app", return_value="neutron-api/2" |
1069 | + ) |
1070 | + def test_plan_action_managed_phase_app(self, mock_leader, mock_units, mock_stdout): |
1071 | + o.plan_action_managed_phase_app("test-app", JUJU_FILE_DICT, "test-release") |
1072 | + calls = [ |
1073 | + call( |
1074 | + " juju config test-app action-managed-upgrade=true openstack-origin=test-release" |
1075 | + ), |
1076 | + call(" {}".format(o.WATCH_COMMAND)), |
1077 | + call(" juju run-action --wait neutron-api/2 openstack-upgrade"), |
1078 | + call(" juju run-action --wait neutron-api/0 openstack-upgrade"), |
1079 | + call(" juju run-action --wait neutron-api/1 openstack-upgrade"), |
1080 | + ] |
1081 | + assert mock_stdout.mock_calls == calls |
1082 | + |
1083 | + @patch("builtins.print") |
1084 | + def test_plan_charm_upgrade_local_path(self, mock_stdout): |
1085 | + output = o.plan_charm_upgrade("juju-lint", JUJU_FILE_DICT, SVC2CHARM) |
1086 | + calls = [ |
1087 | + call( |
1088 | + "\n WARNING!!! Application juju-lint has local charm path local:focal/juju-lint-1\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in local charm are in upstream charm before\n running upgrade-charm on this application.\n " |
1089 | + ), |
1090 | + call(" juju upgrade-charm juju-lint --switch cs:juju-lint"), |
1091 | + call( |
1092 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" |
1093 | + ), |
1094 | + ] |
1095 | + assert mock_stdout.mock_calls == calls |
1096 | + |
1097 | + @patch("builtins.print") |
1098 | + def test_plan_charm_upgrade_cs_path(self, mock_stdout): |
1099 | + output = o.plan_charm_upgrade("neutron-api", JUJU_FILE_DICT, SVC2CHARM) |
1100 | + calls = [ |
1101 | + call(" juju upgrade-charm neutron-api"), |
1102 | + call( |
1103 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" |
1104 | + ), |
1105 | + ] |
1106 | + assert mock_stdout.mock_calls == calls |
1107 | + |
1108 | + @patch("builtins.print") |
1109 | + def test_plan_charm_upgrade_tilde(self, mock_stdout): |
1110 | + output = o.plan_charm_upgrade("easyrsa", JUJU_FILE_DICT, SVC2CHARM) |
1111 | + calls = [ |
1112 | + call( |
1113 | + "\n WARNING!!! Application easyrsa from unrecognized path cs:~containers/easyrsa-345\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n " |
1114 | + ), |
1115 | + call(" juju upgrade-charm easyrsa --switch cs:easyrsa"), |
1116 | + call( |
1117 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" |
1118 | + ), |
1119 | + ] |
1120 | + assert mock_stdout.mock_calls == calls |
1121 | + |
1122 | + @patch("builtins.print") |
1123 | + @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) |
1124 | + def test_plan_charm_upgrade_deferred(self, defer_file, mock_stdout): |
1125 | + output = o.plan_charm_upgrade("rabbitmq-server", JUJU_FILE_DICT, SVC2CHARM) |
1126 | + calls = [ |
1127 | + call( |
1128 | + "\n WARNING!!! Application rabbitmq-server from unrecognized path cs:~llama-charmers-next/rabbitmq-server-5\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n " |
1129 | + ), |
1130 | + call("rabbitmq-server:\n enable-auto-restarts: False", file=defer_file()), |
1131 | + call( |
1132 | + " juju upgrade-charm rabbitmq-server --switch cs:rabbitmq-server --config deferred_restart_config_rabbitmq-server.yaml" |
1133 | + ), |
1134 | + call( |
1135 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" |
1136 | + ), |
1137 | + ] |
1138 | + assert mock_stdout.mock_calls == calls |
1139 | + |
1140 | + |
1141 | +class TestUbuntuSeriesUpgrade: |
1142 | + def test_main_missing_juju_file(self): |
1143 | + with pytest.raises(SystemExit): |
1144 | + u.main() |
1145 | + |
1146 | + @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_sequential") |
1147 | + @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_parallel") |
1148 | + @patch( |
1149 | + "cloud_upgrade_planner.common.find_apps_from_charm", return_value=["app"] |
1150 | + ) |
1151 | + @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) |
1152 | + def test_main_with_juju_file( |
1153 | + self, juju_file, mock_apps_from_charm, mock_parallel, mock_sequential |
1154 | + ): |
1155 | + u.main() |
1156 | + |
1157 | + charm_calls = [] |
1158 | + for phase in range(2): |
1159 | + for app in u.PHASE_APPS[phase]: |
1160 | + charm_calls.append(call(app, JUJU_FILE_DICT)) |
1161 | + mock_apps_from_charm.assert_has_calls(charm_calls) |
1162 | + |
1163 | + parallel_apps_no = len(u.PHASE_APPS[1]) |
1164 | + mock_parallel.assert_called_once_with( |
1165 | + JUJU_FILE_DICT, ["app"] * parallel_apps_no, [] |
1166 | + ) |
1167 | + |
1168 | + sequential_apps_no = len(u.PHASE_APPS[0]) + len(u.PHASE_APPS[2]) |
1169 | + seq_calls = [call(JUJU_FILE_DICT, "app", []) for _ in range(sequential_apps_no)] |
1170 | + mock_sequential.assert_has_calls(seq_calls) |
1171 | + |
1172 | + @patch("builtins.print") |
1173 | + @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) |
1174 | + def test_main_output(self, juju_file, mock_stdout): |
1175 | + u.main() |
1176 | + calls = [ |
1177 | + call(u.PRE_UPGRADE_STEPS), |
1178 | + call("Phase 0\n========"), |
1179 | + call(" App: rabbitmq-server"), |
1180 | + call(" Units rabbitmq-server/5 rabbitmq-server/4 rabbitmq-server/6"), |
1181 | + call( |
1182 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" |
1183 | + ), |
1184 | + call(" juju set-series rabbitmq-server bionic"), |
1185 | + call(" juju run-action --wait rabbitmq-server/4 pause"), |
1186 | + call(" juju run-action --wait rabbitmq-server/6 pause"), |
1187 | + call(" juju upgrade-series 7/lxd/12 prepare bionic -y"), |
1188 | + call(" juju scp local 7/lxd/12:"), |
1189 | + call( |
1190 | + " juju run --machine 7/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1191 | + ), |
1192 | + call( |
1193 | + " juju run --machine 7/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1194 | + ), |
1195 | + call(" juju run --machine 7/lxd/12 --timeout=5m 'sudo init 6'"), |
1196 | + call( |
1197 | + " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/5'" |
1198 | + ), |
1199 | + call(" juju config rabbitmq-server source=distro"), |
1200 | + call(" sleep 120; jw"), |
1201 | + call(" juju upgrade-series 7/lxd/12 complete"), |
1202 | + call(" juju upgrade-series 6/lxd/12 prepare bionic -y"), |
1203 | + call(" juju scp local 6/lxd/12:"), |
1204 | + call( |
1205 | + " juju run --machine 6/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1206 | + ), |
1207 | + call( |
1208 | + " juju run --machine 6/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1209 | + ), |
1210 | + call(" juju run --machine 6/lxd/12 --timeout=5m 'sudo init 6'"), |
1211 | + call( |
1212 | + " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/4'" |
1213 | + ), |
1214 | + call(" juju upgrade-series 6/lxd/12 complete"), |
1215 | + call(" juju upgrade-series 8/lxd/11 prepare bionic -y"), |
1216 | + call(" juju scp local 8/lxd/11:"), |
1217 | + call( |
1218 | + " juju run --machine 8/lxd/11 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1219 | + ), |
1220 | + call( |
1221 | + " juju run --machine 8/lxd/11 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1222 | + ), |
1223 | + call(" juju run --machine 8/lxd/11 --timeout=5m 'sudo init 6'"), |
1224 | + call( |
1225 | + " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/6'" |
1226 | + ), |
1227 | + call(" juju upgrade-series 8/lxd/11 complete"), |
1228 | + call( |
1229 | + " juju run-action --wait rabbitmq-server/5 complete-cluster-series-upgrade" |
1230 | + ), |
1231 | + call("\n"), |
1232 | + call(" App: vault"), |
1233 | + call(" Units vault/1 vault/0 vault/2"), |
1234 | + call( |
1235 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" |
1236 | + ), |
1237 | + call(" juju set-series vault bionic"), |
1238 | + call(" juju run-action --wait hacluster-vault/1 pause"), |
1239 | + call(" juju run-action --wait hacluster-vault/0 pause"), |
1240 | + call(" juju run-action --wait vault/0 pause"), |
1241 | + call(" juju run-action --wait vault/2 pause"), |
1242 | + call(" juju upgrade-series 1 prepare bionic -y"), |
1243 | + call(" juju scp local 1:"), |
1244 | + call( |
1245 | + " juju run --machine 1 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1246 | + ), |
1247 | + call( |
1248 | + " juju run --machine 1 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1249 | + ), |
1250 | + call(" juju run --machine 1 --timeout=5m 'sudo init 6'"), |
1251 | + call(" sleep 120; jw; echo 'Check status of workload for vault/1'"), |
1252 | + call(" sleep 120; jw"), |
1253 | + call(" juju upgrade-series 1 complete"), |
1254 | + call(" juju upgrade-series 0 prepare bionic -y"), |
1255 | + call(" juju scp local 0:"), |
1256 | + call( |
1257 | + " juju run --machine 0 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1258 | + ), |
1259 | + call( |
1260 | + " juju run --machine 0 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1261 | + ), |
1262 | + call(" juju run --machine 0 --timeout=5m 'sudo init 6'"), |
1263 | + call(" sleep 120; jw; echo 'Check status of workload for vault/0'"), |
1264 | + call(" juju upgrade-series 0 complete"), |
1265 | + call(" juju upgrade-series 2 prepare bionic -y"), |
1266 | + call(" juju scp local 2:"), |
1267 | + call( |
1268 | + " juju run --machine 2 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1269 | + ), |
1270 | + call( |
1271 | + " juju run --machine 2 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1272 | + ), |
1273 | + call(" juju run --machine 2 --timeout=5m 'sudo init 6'"), |
1274 | + call(" sleep 120; jw; echo 'Check status of workload for vault/2'"), |
1275 | + call(" juju upgrade-series 2 complete"), |
1276 | + call( |
1277 | + " ## Manual step: You will need to unseal the vault after each unit reboots" |
1278 | + ), |
1279 | + call("\n"), |
1280 | + call("Phase 1\n========"), |
1281 | + call(" juju set-series neutron-api bionic"), |
1282 | + call(" juju set-series nova-cloud-controller bionic"), |
1283 | + call(" juju set-series memcached bionic"), |
1284 | + call(" juju set-series easyrsa bionic"), |
1285 | + call(" # Pause non-leaders"), |
1286 | + call( |
1287 | + " juju run-action --wait hacluster-neutron/0 pause\n juju run-action --wait hacluster-neutron/1 pause\n juju run-action --wait hacluster-nova/0 pause\n juju run-action --wait hacluster-nova/2 pause" |
1288 | + ), |
1289 | + call( |
1290 | + " juju run-action --wait neutron-api/0 pause\n juju run-action --wait neutron-api/1 pause\n juju run-action --wait nova-cloud-controller/0 pause\n juju run-action --wait nova-cloud-controller/1 pause" |
1291 | + ), |
1292 | + call( |
1293 | + " # Prepare leaders - These can be done in parallel before non-leaders" |
1294 | + ), |
1295 | + call( |
1296 | + " juju upgrade-series 8/lxd/5 prepare bionic -y # neutron-api/2\n juju upgrade-series 8/lxd/6 prepare bionic -y # nova-cloud-controller/2\n juju upgrade-series 5/lxd/4 prepare bionic -y # memcached/1\n juju upgrade-series 7/lxd/3 prepare bionic -y # easyrsa/0" |
1297 | + ), |
1298 | + call( |
1299 | + " # Prepare non-leaders - These can be done in parallel after leaders" |
1300 | + ), |
1301 | + call( |
1302 | + " juju upgrade-series 6/lxd/5 prepare bionic -y # neutron-api/0\n juju upgrade-series 7/lxd/6 prepare bionic -y # neutron-api/1\n juju upgrade-series 6/lxd/6 prepare bionic -y # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 prepare bionic -y # nova-cloud-controller/1" |
1303 | + ), |
1304 | + call( |
1305 | + " for i in 6/lxd/5 7/lxd/6 8/lxd/5 6/lxd/6 7/lxd/7 8/lxd/6 5/lxd/4 7/lxd/3; do juju scp local $i: ; done" |
1306 | + ), |
1307 | + call(" # Update all machines to latest patches"), |
1308 | + call( |
1309 | + " ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step" |
1310 | + ), |
1311 | + call( |
1312 | + " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1313 | + ), |
1314 | + call(" # Perform do-release-upgrade"), |
1315 | + call( |
1316 | + " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1317 | + ), |
1318 | + call( |
1319 | + " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" |
1320 | + ), |
1321 | + call( |
1322 | + " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" |
1323 | + ), |
1324 | + call( |
1325 | + " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" |
1326 | + ), |
1327 | + call(" # Reboot all API machines into bionic"), |
1328 | + call( |
1329 | + " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=10m 'sudo init 6'" |
1330 | + ), |
1331 | + call(" # Post Upgrade tasks"), |
1332 | + call( |
1333 | + " juju config neutron-api openstack-origin=distro\n juju config nova-cloud-controller openstack-origin=distro\n juju config memcached openstack-origin=distro\n juju config easyrsa openstack-origin=distro" |
1334 | + ), |
1335 | + call( |
1336 | + " # Complete leaders - These can all be done in parallel before non-leaders" |
1337 | + ), |
1338 | + call( |
1339 | + " echo 'Check status of workload for neutron-api/2'\n juju upgrade-series 8/lxd/5 complete # neutron-api/2\n echo 'Check status of workload for nova-cloud-controller/2'\n juju upgrade-series 8/lxd/6 complete # nova-cloud-controller/2\n echo 'Check status of workload for memcached/1'\n juju upgrade-series 5/lxd/4 complete # memcached/1\n echo 'Check status of workload for easyrsa/0'\n juju upgrade-series 7/lxd/3 complete # easyrsa/0" |
1340 | + ), |
1341 | + call( |
1342 | + " # Complete non-leaders - These can all be done in parallel after leaders complete" |
1343 | + ), |
1344 | + call( |
1345 | + " juju upgrade-series 6/lxd/5 complete # neutron-api/0\n juju upgrade-series 7/lxd/6 complete # neutron-api/1\n juju upgrade-series 6/lxd/6 complete # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 complete # nova-cloud-controller/1" |
1346 | + ), |
1347 | + call(" # Post App Upgrade cleanup tasks"), |
1348 | + call(""), |
1349 | + call("\n"), |
1350 | + call("Phase 2\n========"), |
1351 | + call(" App: nova-compute-kvm"), |
1352 | + call( |
1353 | + " Units nova-compute-kvm/7 nova-compute-kvm/1 nova-compute-kvm/10 nova-compute-kvm/11 nova-compute-kvm/12 nova-compute-kvm/13 nova-compute-kvm/9" |
1354 | + ), |
1355 | + call( |
1356 | + " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" |
1357 | + ), |
1358 | + call(" juju set-series nova-compute-kvm bionic"), |
1359 | + call(" juju upgrade-series 16 prepare bionic -y"), |
1360 | + call(" juju scp local 16:"), |
1361 | + call( |
1362 | + " juju run --machine 16 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1363 | + ), |
1364 | + call( |
1365 | + " juju run --machine 16 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1366 | + ), |
1367 | + call(" juju run --machine 16 --timeout=5m 'sudo init 6'"), |
1368 | + call( |
1369 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/7'" |
1370 | + ), |
1371 | + call(" juju config nova-compute-kvm openstack-origin=distro"), |
1372 | + call(" sleep 120; jw"), |
1373 | + call(" juju upgrade-series 16 complete"), |
1374 | + call(" juju upgrade-series 10 prepare bionic -y"), |
1375 | + call(" juju scp local 10:"), |
1376 | + call( |
1377 | + " juju run --machine 10 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1378 | + ), |
1379 | + call( |
1380 | + " juju run --machine 10 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1381 | + ), |
1382 | + call(" juju run --machine 10 --timeout=5m 'sudo init 6'"), |
1383 | + call( |
1384 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/1'" |
1385 | + ), |
1386 | + call(" juju upgrade-series 10 complete"), |
1387 | + call(" juju upgrade-series 19 prepare bionic -y"), |
1388 | + call(" juju scp local 19:"), |
1389 | + call( |
1390 | + " juju run --machine 19 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1391 | + ), |
1392 | + call( |
1393 | + " juju run --machine 19 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1394 | + ), |
1395 | + call(" juju run --machine 19 --timeout=5m 'sudo init 6'"), |
1396 | + call( |
1397 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/10'" |
1398 | + ), |
1399 | + call(" juju upgrade-series 19 complete"), |
1400 | + call(" juju upgrade-series 20 prepare bionic -y"), |
1401 | + call(" juju scp local 20:"), |
1402 | + call( |
1403 | + " juju run --machine 20 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1404 | + ), |
1405 | + call( |
1406 | + " juju run --machine 20 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1407 | + ), |
1408 | + call(" juju run --machine 20 --timeout=5m 'sudo init 6'"), |
1409 | + call( |
1410 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/11'" |
1411 | + ), |
1412 | + call(" juju upgrade-series 20 complete"), |
1413 | + call(" juju upgrade-series 21 prepare bionic -y"), |
1414 | + call(" juju scp local 21:"), |
1415 | + call( |
1416 | + " juju run --machine 21 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1417 | + ), |
1418 | + call( |
1419 | + " juju run --machine 21 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1420 | + ), |
1421 | + call(" juju run --machine 21 --timeout=5m 'sudo init 6'"), |
1422 | + call( |
1423 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/12'" |
1424 | + ), |
1425 | + call(" juju upgrade-series 21 complete"), |
1426 | + call(" juju upgrade-series 22 prepare bionic -y"), |
1427 | + call(" juju scp local 22:"), |
1428 | + call( |
1429 | + " juju run --machine 22 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1430 | + ), |
1431 | + call( |
1432 | + " juju run --machine 22 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1433 | + ), |
1434 | + call(" juju run --machine 22 --timeout=5m 'sudo init 6'"), |
1435 | + call( |
1436 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/13'" |
1437 | + ), |
1438 | + call(" juju upgrade-series 22 complete"), |
1439 | + call(" juju upgrade-series 18 prepare bionic -y"), |
1440 | + call(" juju scp local 18:"), |
1441 | + call( |
1442 | + " juju run --machine 18 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" |
1443 | + ), |
1444 | + call( |
1445 | + " juju run --machine 18 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" |
1446 | + ), |
1447 | + call(" juju run --machine 18 --timeout=5m 'sudo init 6'"), |
1448 | + call( |
1449 | + " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/9'" |
1450 | + ), |
1451 | + call(" juju upgrade-series 18 complete"), |
1452 | + call("\n"), |
1453 | + call(u.POST_UPGRADE_STEPS), |
1454 | + ] |
1455 | + |
1456 | + assert mock_stdout.mock_calls == calls |
This merge proposal is being monitored by mergebot. Change the status to Approved to merge.