Merge ~smigiel-dariusz/cloud-upgrade-planner:pre_integration into cloud-upgrade-planner:master
- Git
- lp:~smigiel-dariusz/cloud-upgrade-planner
- pre_integration
- Merge into master
Proposed by
Dariusz Smigiel
Status: | Merged |
---|---|
Approved by: | James Troup |
Approved revision: | 3a2068a958d16e398e45fcf0a1cde80e16d137b5 |
Merged at revision: | bf1c391b152991cfca9aa6311e471e48302c7f1d |
Proposed branch: | ~smigiel-dariusz/cloud-upgrade-planner:pre_integration |
Merge into: | cloud-upgrade-planner:master |
Diff against target: |
1456 lines (+1412/-0) 5 files modified
cloud_upgrade_planner/common.py (+76/-0) cloud_upgrade_planner/openstack_managed_upgrade.py (+278/-0) cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py (+328/-0) setup.py (+2/-0) tests/test_ubuntu_series_upgrade_for_openstack.py (+728/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
BootStack Reviewers | Pending | ||
BootStack Reviewers | Pending | ||
Review via email:
|
Commit message
Migration of bootstack-tools to cloud-upgrade-
Description of the change
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
James Troup (elmo) wrote : | # |
I'll merge this, but there are somethings that need fixed in followups please:
1) assumption of /home/ubuntu
2) reference to Canonical Wiki
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Change successfully merged at revision bf1c391b152991c
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloud_upgrade_planner/cli.py b/cloud_upgrade_planner/cli.py | |||
2 | 0 | old mode 100755 | 0 | old mode 100755 |
3 | 1 | new mode 100644 | 1 | new mode 100644 |
4 | diff --git a/cloud_upgrade_planner/common.py b/cloud_upgrade_planner/common.py | |||
5 | 2 | new file mode 100644 | 2 | new file mode 100644 |
6 | index 0000000..a31016e | |||
7 | --- /dev/null | |||
8 | +++ b/cloud_upgrade_planner/common.py | |||
9 | @@ -0,0 +1,76 @@ | |||
10 | 1 | import re | ||
11 | 2 | import sys | ||
12 | 3 | import yaml | ||
13 | 4 | |||
14 | 5 | |||
15 | 6 | def find_units_for_app(app, model_apps): | ||
16 | 7 | return list(model_apps[app]['units'].keys()) | ||
17 | 8 | |||
18 | 9 | |||
19 | 10 | def find_leader_for_app(app, model_apps): | ||
20 | 11 | for unit in model_apps[app]['units'].keys(): | ||
21 | 12 | if 'leader' in model_apps[app]['units'][unit]: | ||
22 | 13 | return unit | ||
23 | 14 | |||
24 | 15 | |||
25 | 16 | def find_charms_apps_in_model(charms, model_apps, svc2charm): | ||
26 | 17 | apps = [] | ||
27 | 18 | for charm in charms: | ||
28 | 19 | apps_of_charm = [app for app in model_apps.keys() if svc2charm[app] == charm] | ||
29 | 20 | apps.extend(apps_of_charm) | ||
30 | 21 | return apps | ||
31 | 22 | |||
32 | 23 | |||
33 | 24 | def get_model_apps(status_file): | ||
34 | 25 | try: | ||
35 | 26 | with open(status_file) as myjsfy: | ||
36 | 27 | return yaml.safe_load(myjsfy)["applications"] | ||
37 | 28 | except IOError: | ||
38 | 29 | print("Please run a fresh copy of juju status --format yaml > jsfy") | ||
39 | 30 | sys.exit(1) | ||
40 | 31 | |||
41 | 32 | |||
42 | 33 | def find_apps_from_charm(charm, model_apps): | ||
43 | 34 | # Match local: or cs: + ~user/charm, series/charm, or charm | ||
44 | 35 | regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format(charm)) | ||
45 | 36 | return [app for app in model_apps.keys() | ||
46 | 37 | if re.match(regex, model_apps[app]['charm']) is not None] | ||
47 | 38 | |||
48 | 39 | |||
49 | 40 | def find_machine_from_unit(app, unit, model_apps): | ||
50 | 41 | return model_apps[app]['units'][unit]['machine'] | ||
51 | 42 | |||
52 | 43 | |||
53 | 44 | def render_app_to_charm_dict(model_apps): | ||
54 | 45 | app2charm = {} | ||
55 | 46 | # Match local: or cs: + ~user/charm, series/charm, or charm | ||
56 | 47 | # regex groups are: | ||
57 | 48 | # 0: whole match | ||
58 | 49 | # 1: store|local | ||
59 | 50 | # 2: ~namespace or series | ||
60 | 51 | # 3: charm name | ||
61 | 52 | regex = re.compile(r"(local:|cs:)(~*.*/|)(.*)-\d+") | ||
62 | 53 | for app in model_apps.keys(): | ||
63 | 54 | app2charm[app] = re.match(regex, model_apps[app]['charm']).group(3) | ||
64 | 55 | return app2charm | ||
65 | 56 | |||
66 | 57 | |||
67 | 58 | def available_upgrade(app, model_apps): | ||
68 | 59 | if 'can-upgrade-to' in model_apps[app]: | ||
69 | 60 | return True | ||
70 | 61 | print(" # No charm upgrade available for {}. " | ||
71 | 62 | "Running: {}".format(app, model_apps[app]["charm"])) | ||
72 | 63 | return False | ||
73 | 64 | |||
74 | 65 | |||
75 | 66 | def find_hacluster_for_unit(app, unit, model_apps): | ||
76 | 67 | if 'subordinates' not in model_apps[app]['units'][unit]: | ||
77 | 68 | return None | ||
78 | 69 | subordinate_units = model_apps[app]['units'][unit]['subordinates'].keys() | ||
79 | 70 | regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format('hacluster')) | ||
80 | 71 | for subunit in subordinate_units: | ||
81 | 72 | subunit_app = subunit.split("/")[0] | ||
82 | 73 | if re.match(regex, model_apps[subunit_app]['charm']) is not None: | ||
83 | 74 | return subunit | ||
84 | 75 | return None | ||
85 | 76 | |||
86 | diff --git a/cloud_upgrade_planner/openstack_managed_upgrade.py b/cloud_upgrade_planner/openstack_managed_upgrade.py | |||
87 | 0 | new file mode 100644 | 77 | new file mode 100644 |
88 | index 0000000..4b4960e | |||
89 | --- /dev/null | |||
90 | +++ b/cloud_upgrade_planner/openstack_managed_upgrade.py | |||
91 | @@ -0,0 +1,278 @@ | |||
92 | 1 | #!/usr/bin/python3 | ||
93 | 2 | |||
94 | 3 | # requres file in current directory named "jsfy" created with | ||
95 | 4 | # juju status --format yaml > jsfy | ||
96 | 5 | # (version of juju must support leadership in yaml output) | ||
97 | 6 | |||
98 | 7 | # Assumptions | ||
99 | 8 | # we assume you're deploying either cs:charmname or cs:~namespace/charmname | ||
100 | 9 | # or local:<series>/<charm> versions of charms, and does not support | ||
101 | 10 | # differently-named charms. | ||
102 | 11 | |||
103 | 12 | # set this to the destination openstack-origin of your choice | ||
104 | 13 | # TODO: | ||
105 | 14 | # include optional unit pauses for pure clean HA | ||
106 | 15 | # include evacuating and monitoring each nova compute node | ||
107 | 16 | # include neutron-gateway HA router migrations if pausing | ||
108 | 17 | # inclue gnocchi/aodh/octavia/placement installs | ||
109 | 18 | |||
110 | 19 | import argparse | ||
111 | 20 | import re | ||
112 | 21 | import sys | ||
113 | 22 | |||
114 | 23 | from cloud_upgrade_planner import common | ||
115 | 24 | |||
116 | 25 | # The phases of the update. After any of these phases, updates can be paused | ||
117 | 26 | # Each phase is recommended to take a day, with phases 3 and 4 being combined | ||
118 | 27 | PHASE_APPS = {0: ["openstack-service-checks", | ||
119 | 28 | "percona-cluster", | ||
120 | 29 | "rabbitmq-server"], | ||
121 | 30 | 1: ["keystone", | ||
122 | 31 | "ceph-mon"], | ||
123 | 32 | 2: ["ceph-fs", | ||
124 | 33 | "ceph-radosgw", | ||
125 | 34 | "swift-proxy", | ||
126 | 35 | "swift-storage", | ||
127 | 36 | "cinder", | ||
128 | 37 | "cinder-ceph", | ||
129 | 38 | "cinder-backup", | ||
130 | 39 | "barbican", | ||
131 | 40 | "glance", | ||
132 | 41 | "aodh", | ||
133 | 42 | "gnocchi", | ||
134 | 43 | "ceilometer", | ||
135 | 44 | "designate", | ||
136 | 45 | "designate-bind", | ||
137 | 46 | "neutron-api", | ||
138 | 47 | "nova-cloud-controller", | ||
139 | 48 | "heat", | ||
140 | 49 | "manila", | ||
141 | 50 | "manila-generic", | ||
142 | 51 | "openstack-dashboard", | ||
143 | 52 | "placement", | ||
144 | 53 | "octavia", | ||
145 | 54 | "easyrsa", | ||
146 | 55 | "ovn-central"], | ||
147 | 56 | 3: ["neutron-gateway", | ||
148 | 57 | "nova-compute", | ||
149 | 58 | "neutron-openvswitch", | ||
150 | 59 | "ovn-chassis", | ||
151 | 60 | "ceph-osd"]} | ||
152 | 61 | |||
153 | 62 | ACTION_MANAGED_PHASES = [1, 3] | ||
154 | 63 | DEFERRED_WAIT_PHASES = [2] | ||
155 | 64 | |||
156 | 65 | # These charms use source: instead of openstack-origin: config flags | ||
157 | 66 | ORIGIN_VAR_IS_SOURCE = ["ceph-osd", | ||
158 | 67 | "ceph-mon", | ||
159 | 68 | "ceph-radosgw", | ||
160 | 69 | "percona-cluster", | ||
161 | 70 | "rabbitmq-server"] | ||
162 | 71 | |||
163 | 72 | # These charms do not have openstack-origin/source config flags | ||
164 | 73 | CHARM_UPGRADES_ONLY = ["neutron-openvswitch", | ||
165 | 74 | "ovn-chassis", | ||
166 | 75 | "memcached", | ||
167 | 76 | "mongodb", | ||
168 | 77 | "graylog", | ||
169 | 78 | "prometheus", | ||
170 | 79 | "openstack-service-checks", | ||
171 | 80 | "cinder-ceph", | ||
172 | 81 | "cinder-backup", | ||
173 | 82 | "easyrsa", | ||
174 | 83 | "designate-bind"] | ||
175 | 84 | |||
176 | 85 | # Enable deferred restarts on these charms as they are upgraded | ||
177 | 86 | DEFERRED_RESTART_CHARMS = [ | ||
178 | 87 | "neutron-gateway", | ||
179 | 88 | "neutron-openvswitch", | ||
180 | 89 | "ovn-central", | ||
181 | 90 | "ovn-chassis", | ||
182 | 91 | "ovn-dedicated-chassis", | ||
183 | 92 | "rabbitmq-server", | ||
184 | 93 | ] | ||
185 | 94 | |||
186 | 95 | # TODO: When upgrading to these versions, add step to install these charms | ||
187 | 96 | # between Phase 1 and Phase 2 | ||
188 | 97 | NEW_CHARMS_FOR_VERSIONS = {"train": ["placement"], | ||
189 | 98 | "queens": ["octavia"], | ||
190 | 99 | "ocata": ["aodh", "gnocchi"]} | ||
191 | 100 | |||
192 | 101 | # This can be used both for required --switch changes for 19.04+ or can be | ||
193 | 102 | # populated with specific version URLs | ||
194 | 103 | # e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be | ||
195 | 104 | # switched to cs:~openstack-charmers/cinder-backup | ||
196 | 105 | # Anything not called out with these paths will be switched to cs:<charmname> | ||
197 | 106 | # if currently running cs:~*/<charmname> or local:<charmname> | ||
198 | 107 | CHARMSTORE_TILDE_PATHS = { | ||
199 | 108 | "cinder-backup": "cs:~openstack-charmers/cinder-backup", | ||
200 | 109 | "easyrsa": "cs:~containers/easyrsa", | ||
201 | 110 | "policy-routing": "cs:~canonical-bootstack/policy-routing", | ||
202 | 111 | "infra-node": "cs:~canonical-bootstack/infra-node", | ||
203 | 112 | } | ||
204 | 113 | |||
205 | 114 | JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing" | ||
206 | 115 | WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES) | ||
207 | 116 | |||
208 | 117 | PRE_UPGRADE_STEPS = """ | ||
209 | 118 | This playbook assumes all applications are running an identical openstack-origin/source of the prior version openstack. | ||
210 | 119 | You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify the first and last apps are at the same level. | ||
211 | 120 | |||
212 | 121 | ### IMPORTANT ### | ||
213 | 122 | Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders | ||
214 | 123 | |||
215 | 124 | Please read the wiki for openstack upgrades for known issues and read the upstream charm guide release notes and issues. | ||
216 | 125 | https://wiki.canonical.com/CDO/IS/Bootstack/Playbooks/OpenstackReleaseUpgradeActionManaged | ||
217 | 126 | https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html | ||
218 | 127 | |||
219 | 128 | There may be several things that need to be updated before changing to a new version of openstack, such as | ||
220 | 129 | setting keystone tokens to fernet before upgrade to Rocky, | ||
221 | 130 | ensuring that the ceph minimum_osd_version is set for the current version of ceph before upgrade, | ||
222 | 131 | before upgrading to train, ensure that you've installed octavia and have migrated loadbalancers, as lbaasv2 is deprecated. | ||
223 | 132 | |||
224 | 133 | See the referenced docs for these steps. | ||
225 | 134 | |||
226 | 135 | After any upgrades, please check status of services via nagios/thruk to ensure you're not exhibiting new alerts. | ||
227 | 136 | |||
228 | 137 | This upgrade guide does not cover router or VM migrations to avoid data-plane outages during package upgrades. | ||
229 | 138 | Please be sure to plan accordingly based on customer request. | ||
230 | 139 | |||
231 | 140 | All charms should be at the latest version before upgrading openstack. | ||
232 | 141 | """ | ||
233 | 142 | |||
234 | 143 | POST_UPGRADE_STEPS = """ | ||
235 | 144 | |||
236 | 145 | # POST UPGRADE ACTIONS | ||
237 | 146 | Perform these steps after the cloud has completed upgrades | ||
238 | 147 | |||
239 | 148 | Validate Nagios is clean and any disabled nova-compute services have been re-enabled. | ||
240 | 149 | |||
241 | 150 | Update ceph minimum_osd_version per docs: | ||
242 | 151 | https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html#ceph-option-require-osd-release | ||
243 | 152 | """ | ||
244 | 153 | |||
245 | 154 | |||
246 | 155 | def plan_charm_upgrade(app, model_apps, svc2charm): | ||
247 | 156 | switch_args, config_filename = "", "" | ||
248 | 157 | charm = svc2charm[app] | ||
249 | 158 | current_charm = model_apps[app]['charm'] | ||
250 | 159 | # strip version number (eg: cs:~foo/my-charm-123 becomes cs:~foo/my-charm) | ||
251 | 160 | current_charm_path = current_charm.rsplit("-", 1)[0] | ||
252 | 161 | target_tilde = CHARMSTORE_TILDE_PATHS.get(charm, None) | ||
253 | 162 | |||
254 | 163 | if "local:" in current_charm_path and not target_tilde: | ||
255 | 164 | # Provide warning to user that local charm was found and needs sanity check | ||
256 | 165 | print(""" | ||
257 | 166 | WARNING!!! Application {app} has local charm path {current_charm} | ||
258 | 167 | Suggesting switch to latest promulgated cs: version. | ||
259 | 168 | Please ensure updates in local charm are in upstream charm before | ||
260 | 169 | running upgrade-charm on this application. | ||
261 | 170 | """.format(app=app, current_charm=current_charm)) | ||
262 | 171 | switch_args += " --switch cs:{}".format(charm) | ||
263 | 172 | elif target_tilde and current_charm_path != target_tilde: | ||
264 | 173 | switch_args += " --switch {}".format(target_tilde) | ||
265 | 174 | elif "cs:~" in current_charm_path or "/" in current_charm_path: | ||
266 | 175 | print(""" | ||
267 | 176 | WARNING!!! Application {app} from unrecognized path {current_charm} | ||
268 | 177 | Suggesting switch to latest promulgated cs: version. | ||
269 | 178 | Please ensure updates in the above branched charm are in the latest | ||
270 | 179 | promulgated charm before running upgrade-charm on this application. | ||
271 | 180 | """.format(app=app, current_charm=current_charm)) | ||
272 | 181 | switch_args += " --switch {}".format("cs:{}".format(charm)) | ||
273 | 182 | |||
274 | 183 | if charm in DEFERRED_RESTART_CHARMS: | ||
275 | 184 | config_filename = "deferred_restart_config_{}.yaml".format(app) | ||
276 | 185 | # Format of config file per 'juju deploy --help' doc | ||
277 | 186 | # application-name: | ||
278 | 187 | # option1: value1 | ||
279 | 188 | # option2: value2 | ||
280 | 189 | with open(config_filename, "w") as conf: | ||
281 | 190 | print("{}:\n enable-auto-restarts: False".format(app), file=conf) | ||
282 | 191 | |||
283 | 192 | if switch_args or common.available_upgrade(app, model_apps): | ||
284 | 193 | config_args = ' --config {}'.format(config_filename) if config_filename else "" | ||
285 | 194 | print(" juju upgrade-charm {}{}{}".format(app, switch_args, config_args)) | ||
286 | 195 | print(" {}\n\n".format(WATCH_COMMAND)) | ||
287 | 196 | elif config_filename: | ||
288 | 197 | config_args = ' --file {}'.format(config_filename) if config_filename else "" | ||
289 | 198 | print(" juju config {}{}".format(app, config_args)) | ||
290 | 199 | print(" {}\n\n".format(WATCH_COMMAND)) | ||
291 | 200 | |||
292 | 201 | |||
293 | 202 | def plan_action_managed_phase_app(app, model_apps, release): | ||
294 | 203 | leader = common.find_leader_for_app(app, model_apps) | ||
295 | 204 | units = common.find_units_for_app(app, model_apps) | ||
296 | 205 | units.remove(leader) | ||
297 | 206 | unit_list = " ".join(units) | ||
298 | 207 | upgradeorder = [leader] | ||
299 | 208 | upgradeorder.extend(units) | ||
300 | 209 | print(" juju config {} " | ||
301 | 210 | "action-managed-upgrade=true " | ||
302 | 211 | "openstack-origin={}".format(app, release)) | ||
303 | 212 | print(" {}".format(WATCH_COMMAND)) | ||
304 | 213 | for unit in upgradeorder: | ||
305 | 214 | print(" juju run-action --wait {} " | ||
306 | 215 | "openstack-upgrade".format(unit)) | ||
307 | 216 | |||
308 | 217 | |||
309 | 218 | def parse_args(argv): | ||
310 | 219 | """Process CLI arguments.""" | ||
311 | 220 | parser = argparse.ArgumentParser( | ||
312 | 221 | prog="openstack_managed_upgrade.py", | ||
313 | 222 | description=( | ||
314 | 223 | "this program uses the output from juju status --format yaml" | ||
315 | 224 | "and creates a playbook of juju upgrade actions to upgrade the" | ||
316 | 225 | "version of Openstack to the target version" | ||
317 | 226 | ), | ||
318 | 227 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||
319 | 228 | ) | ||
320 | 229 | parser.add_argument("-f", "--status-file", dest="status_file", type=str, | ||
321 | 230 | default="jsfy", | ||
322 | 231 | help="file containing 'juju status --format yaml' output") | ||
323 | 232 | parser.add_argument("-c", "--charms-only", dest="charms_only", | ||
324 | 233 | action="store_true", | ||
325 | 234 | help="Only plan charm upgrades") | ||
326 | 235 | parser.add_argument("release", type=str, | ||
327 | 236 | help="Set release to UCA target for upgrade such as: 'cloud:bionic-ussuri'") | ||
328 | 237 | return parser.parse_args(argv) | ||
329 | 238 | |||
330 | 239 | |||
331 | 240 | def main(argv=None): | ||
332 | 241 | args = parse_args(argv) | ||
333 | 242 | model_apps = common.get_model_apps(args.status_file) | ||
334 | 243 | svc2charm = common.render_app_to_charm_dict(model_apps) | ||
335 | 244 | print(PRE_UPGRADE_STEPS) | ||
336 | 245 | for phase in sorted(PHASE_APPS): | ||
337 | 246 | print("Phase {}\n========".format(phase)) | ||
338 | 247 | this_phase_apps = common.find_charms_apps_in_model(PHASE_APPS[phase], | ||
339 | 248 | model_apps, svc2charm) | ||
340 | 249 | for app in this_phase_apps: | ||
341 | 250 | # skip landscape upgrades | ||
342 | 251 | if "landscape" in app: | ||
343 | 252 | continue | ||
344 | 253 | print(" App: {}".format(app)) | ||
345 | 254 | print(" Charm: {}".format(svc2charm[app])) | ||
346 | 255 | print(" Commands:") | ||
347 | 256 | plan_charm_upgrade(app, model_apps, svc2charm) | ||
348 | 257 | if args.charms_only: | ||
349 | 258 | continue | ||
350 | 259 | if svc2charm[app] in CHARM_UPGRADES_ONLY: | ||
351 | 260 | # no openstack repo upgrade for these charms | ||
352 | 261 | continue | ||
353 | 262 | if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: | ||
354 | 263 | # not possible to be action-managed-upgrade | ||
355 | 264 | print(" juju config {} source={}".format(app, args.release)) | ||
356 | 265 | elif phase in ACTION_MANAGED_PHASES: | ||
357 | 266 | plan_action_managed_phase_app(app, model_apps, args.release) | ||
358 | 267 | else: | ||
359 | 268 | print(" juju config {} openstack-origin={}" | ||
360 | 269 | " action-managed-upgrade=false".format(app, args.release)) | ||
361 | 270 | if phase not in DEFERRED_WAIT_PHASES: | ||
362 | 271 | print(" {}\n".format(WATCH_COMMAND)) | ||
363 | 272 | if not args.charms_only and phase in DEFERRED_WAIT_PHASES: | ||
364 | 273 | print("\n {}\n".format(WATCH_COMMAND)) | ||
365 | 274 | |||
366 | 275 | print(POST_UPGRADE_STEPS) | ||
367 | 276 | |||
368 | 277 | if __name__ == "__main__": | ||
369 | 278 | main(sys.argv[1:]) | ||
370 | diff --git a/cloud_upgrade_planner/plan.py b/cloud_upgrade_planner/plan.py | |||
371 | 0 | old mode 100755 | 279 | old mode 100755 |
372 | 1 | new mode 100644 | 280 | new mode 100644 |
373 | diff --git a/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py | |||
374 | 2 | new file mode 100644 | 281 | new file mode 100644 |
375 | index 0000000..8db72c1 | |||
376 | --- /dev/null | |||
377 | +++ b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py | |||
378 | @@ -0,0 +1,328 @@ | |||
379 | 1 | #!/usr/bin/python3 | ||
380 | 2 | |||
381 | 3 | # requres file in current directory named "jsfy" created with | ||
382 | 4 | # juju status --format yaml > jsfy | ||
383 | 5 | # (version of juju must support leadership in yaml output) | ||
384 | 6 | |||
385 | 7 | # Assumptions | ||
386 | 8 | # we assume you're deploying either cs:charmname or cs:~namespace/charmname | ||
387 | 9 | # or local:<series>/<charm> versions of charms, and does not support | ||
388 | 10 | # differently-named charms. | ||
389 | 11 | |||
390 | 12 | # set this to the destination openstack-origin of your choice | ||
391 | 13 | # TODO: add in parseargs | ||
392 | 14 | # include stepped version callouts | ||
393 | 15 | # include evacuating and monitoring each nova compute node | ||
394 | 16 | # include neutron-gateway HA router migrations if pausing | ||
395 | 17 | |||
396 | 18 | import re | ||
397 | 19 | import sys | ||
398 | 20 | import yaml | ||
399 | 21 | |||
400 | 22 | from cloud_upgrade_planner import common | ||
401 | 23 | |||
402 | 24 | # Set this to your target platform ### See TODOs | ||
403 | 25 | UCA = "distro" | ||
404 | 26 | series = "bionic" | ||
405 | 27 | svc2charm = {} | ||
406 | 28 | |||
407 | 29 | # The phases of the update. After any of these phases, updates can be paused | ||
408 | 30 | # Each phase is recommended to take a day, with phases 3 and 4 being combined | ||
409 | 31 | PHASE_APPS = {0: ["percona-cluster", | ||
410 | 32 | "rabbitmq-server", | ||
411 | 33 | "ceph-mon", | ||
412 | 34 | "ovn-central", | ||
413 | 35 | "etcd", | ||
414 | 36 | "vault"], | ||
415 | 37 | 1: ["keystone", | ||
416 | 38 | "ceph-fs", | ||
417 | 39 | "ceph-radosgw", | ||
418 | 40 | "swift-proxy", | ||
419 | 41 | "swift-storage", | ||
420 | 42 | "cinder", | ||
421 | 43 | "barbican", | ||
422 | 44 | "glance", | ||
423 | 45 | "aodh", | ||
424 | 46 | "gnocchi", | ||
425 | 47 | "ceilometer", | ||
426 | 48 | "designate", | ||
427 | 49 | "designate-bind", | ||
428 | 50 | "neutron-api", | ||
429 | 51 | "nova-cloud-controller", | ||
430 | 52 | "heat", | ||
431 | 53 | "manila", | ||
432 | 54 | "manila-generic", | ||
433 | 55 | "openstack-dashboard", | ||
434 | 56 | "placement", | ||
435 | 57 | "octavia", | ||
436 | 58 | "memcached", | ||
437 | 59 | "easyrsa"], | ||
438 | 60 | 2: ["nova-compute", | ||
439 | 61 | "neutron-gateway", | ||
440 | 62 | "ceph-osd"], | ||
441 | 63 | } | ||
442 | 64 | |||
443 | 65 | # These charms use source: instead of openstack-origin: config flags | ||
444 | 66 | ORIGIN_VAR_IS_SOURCE = ["ceph-osd", | ||
445 | 67 | "ceph-mon", | ||
446 | 68 | "ceph-fs", | ||
447 | 69 | "ceph-radosgw", | ||
448 | 70 | "percona-cluster", | ||
449 | 71 | "rabbitmq-server"] | ||
450 | 72 | |||
451 | 73 | # These charms only need the OS upgraded, no UCA tracking | ||
452 | 74 | NO_ORIGIN = ["designate-bind", "vault"] | ||
453 | 75 | |||
454 | 76 | # These charms do not have openstack-origin/source config flags | ||
455 | 77 | CHARM_UPGRADES_ONLY = ["neutron-openvswitch", | ||
456 | 78 | "memcached", | ||
457 | 79 | "mongodb", | ||
458 | 80 | "graylog", | ||
459 | 81 | "prometheus", | ||
460 | 82 | "openstack-service-checks", | ||
461 | 83 | "cinder-ceph", | ||
462 | 84 | "cinder-backup"] | ||
463 | 85 | |||
464 | 86 | NO_PAUSE_APPS = ["ceph-mon", | ||
465 | 87 | "ceph-osd", | ||
466 | 88 | "neutron-gateway", | ||
467 | 89 | "nova-compute"] | ||
468 | 90 | |||
469 | 91 | POST_APP_UPGRADE_ACTIONS_FOR_LEADERS = { | ||
470 | 92 | "percona-cluster": "complete-cluster-series-upgrade", | ||
471 | 93 | "rabbitmq-server": "complete-cluster-series-upgrade", | ||
472 | 94 | "ceilometer": "ceilometer-upgrade", | ||
473 | 95 | } | ||
474 | 96 | |||
475 | 97 | POST_APP_UPGRADE_MANUAL_STEPS = { | ||
476 | 98 | "vault": "You will need to unseal the vault after each unit reboots", | ||
477 | 99 | } | ||
478 | 100 | # This can be used both for required --switch changes for 19.04+ or can be | ||
479 | 101 | # populated with specific version URLs | ||
480 | 102 | # e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be | ||
481 | 103 | # switched to cs:~openstack-charmers/cinder-backup | ||
482 | 104 | CHARMSTORE_PATHS = {"cinder-backup": "cs:~openstack-charmers/cinder-backup"} | ||
483 | 105 | JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing" | ||
484 | 106 | WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES) | ||
485 | 107 | |||
486 | 108 | PRE_UPGRADE_STEPS = """ | ||
487 | 109 | This playbook assumes your current openstack revision is at the highest level of the current ubuntu series: | ||
488 | 110 | trusty-mitaka | ||
489 | 111 | xenial-queens | ||
490 | 112 | bionic-ussuri | ||
491 | 113 | You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify. | ||
492 | 114 | |||
493 | 115 | ### IMPORTANT ### | ||
494 | 116 | Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders | ||
495 | 117 | |||
496 | 118 | Perform these steps to prepare your environment/directory for the remaining playbook. | ||
497 | 119 | |||
498 | 120 | # Login to MAAS WebUI and add the new series to MAAS under the Images tab. | ||
499 | 121 | |||
500 | 122 | # Create "local" file for setting your apt preferences - Required for DRU to be non-interactive | ||
501 | 123 | cat > local << EOF | ||
502 | 124 | DPkg::options { "--force-confdef"; "--force-confnew"; } | ||
503 | 125 | EOF | ||
504 | 126 | |||
505 | 127 | # update the default-series of the model to the new series | ||
506 | 128 | juju model-config default-series=bionic # or focal | ||
507 | 129 | |||
508 | 130 | """ | ||
509 | 131 | |||
510 | 132 | POST_UPGRADE_STEPS = """ | ||
511 | 133 | |||
512 | 134 | # POST UPGRADE ACTIONS | ||
513 | 135 | Perform these steps after the cloud has completed upgrades | ||
514 | 136 | |||
515 | 137 | # Remove the "local" file added to apt configs for DRU, as it's not correct for typical upgrades | ||
516 | 138 | juju run --all 'sudo rm /etc/apt/apt.conf.d/local' | ||
517 | 139 | |||
518 | 140 | N.B. Do not upgrade Nagios from xenial to bionic until all cloud nodes have completed upgrade to bionic. | ||
519 | 141 | |||
520 | 142 | Upgrade all remaining machines (typically LMA stack) not previously captured: | ||
521 | 143 | # find the units with: | ||
522 | 144 | juju machines|grep <previous release|xenial|bionic|etc> | ||
523 | 145 | |||
524 | 146 | # Check each of those machines that they're not already upgraded by listing /etc/os-release file | ||
525 | 147 | juju run --machine <comma,separated,list,of,machines,left,at,old,version> 'hostname; grep VERSION_CODENAME /etc/os-release' | ||
526 | 148 | |||
527 | 149 | # perform steps similiar to above for the remaining machines/applications. | ||
528 | 150 | """ | ||
529 | 151 | |||
530 | 152 | |||
531 | 153 | def plan_parallel(model_apps, parallel_apps, machines_planned): | ||
532 | 154 | cluster_pause_tasks = [] | ||
533 | 155 | pause_tasks = [] | ||
534 | 156 | leader_prepare_tasks = [] | ||
535 | 157 | non_leader_prepare_tasks = [] | ||
536 | 158 | upgrade_machines = [] | ||
537 | 159 | post_upgrade_tasks = [] | ||
538 | 160 | leader_complete_tasks = [] | ||
539 | 161 | non_leader_complete_tasks = [] | ||
540 | 162 | cleanup_tasks = [] | ||
541 | 163 | for app in parallel_apps: | ||
542 | 164 | print(" juju set-series {} {}".format(app, series)) | ||
543 | 165 | leader = common.find_leader_for_app(app, model_apps) | ||
544 | 166 | units = common.find_units_for_app(app, model_apps) | ||
545 | 167 | units.remove(leader) | ||
546 | 168 | |||
547 | 169 | post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None) | ||
548 | 170 | manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None) | ||
549 | 171 | if post_leader_action: | ||
550 | 172 | cleanup_tasks.append(" juju run-action --wait {} {}".format(leader, post_leader_action)) | ||
551 | 173 | if manual_steps: | ||
552 | 174 | cleanup_tasks.append(" ## Manual step: {}".format(manual_steps)) | ||
553 | 175 | |||
554 | 176 | for unit in units: | ||
555 | 177 | unit_machine = common.find_machine_from_unit(app, unit, model_apps) | ||
556 | 178 | if unit_machine in machines_planned: | ||
557 | 179 | # CONSIDER: Note that we decided to skip dist-upgrade since it was already done? | ||
558 | 180 | continue | ||
559 | 181 | hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps) | ||
560 | 182 | if hacluster_unit is not None: | ||
561 | 183 | cluster_pause_tasks.append(" juju run-action --wait {} pause".format(hacluster_unit)) | ||
562 | 184 | if svc2charm[app] not in NO_PAUSE_APPS: | ||
563 | 185 | pause_tasks.append(" juju run-action --wait {} pause".format(unit)) | ||
564 | 186 | non_leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(unit_machine, series, unit)) | ||
565 | 187 | non_leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(unit_machine, unit)) | ||
566 | 188 | upgrade_machines.append(unit_machine) | ||
567 | 189 | machines_planned.append(unit_machine) | ||
568 | 190 | |||
569 | 191 | leader_machine = common.find_machine_from_unit(app, leader, model_apps) | ||
570 | 192 | if leader_machine in machines_planned: | ||
571 | 193 | continue | ||
572 | 194 | # upgrade leader unit's machine | ||
573 | 195 | leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(leader_machine, series, leader)) | ||
574 | 196 | upgrade_machines.append(leader_machine) | ||
575 | 197 | leader_complete_tasks.append(" echo 'Check status of workload for {}'".format(leader)) | ||
576 | 198 | if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: | ||
577 | 199 | post_upgrade_tasks.append(" juju config {} source={}".format(app, UCA)) | ||
578 | 200 | elif svc2charm[app] not in NO_ORIGIN: | ||
579 | 201 | post_upgrade_tasks.append(" juju config {} openstack-origin={}".format(app,UCA)) | ||
580 | 202 | leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(leader_machine, leader)) | ||
581 | 203 | machines_planned.append(leader_machine) | ||
582 | 204 | print(" # Pause non-leaders") | ||
583 | 205 | print("\n".join(cluster_pause_tasks)) | ||
584 | 206 | print("\n".join(pause_tasks)) | ||
585 | 207 | print(" # Prepare leaders - These can be done in parallel before non-leaders") | ||
586 | 208 | print("\n".join(leader_prepare_tasks)) | ||
587 | 209 | print(" # Prepare non-leaders - These can be done in parallel after leaders") | ||
588 | 210 | print("\n".join(non_leader_prepare_tasks)) | ||
589 | 211 | print(" for i in {}; do juju scp local $i: ; done".format(" ".join(upgrade_machines))) | ||
590 | 212 | machine_spec = ",".join(upgrade_machines) | ||
591 | 213 | print(" # Update all machines to latest patches") | ||
592 | 214 | print(" ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step") | ||
593 | 215 | print(" juju run --machine {} --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format(machine_spec)) | ||
594 | 216 | print(" # Perform do-release-upgrade") | ||
595 | 217 | print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(machine_spec)) | ||
596 | 218 | print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") | ||
597 | 219 | print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") | ||
598 | 220 | print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot") | ||
599 | 221 | print(" # Reboot all API machines into {}".format(series)) | ||
600 | 222 | print(" juju run --machine {} --timeout=10m 'sudo init 6'".format(machine_spec)) | ||
601 | 223 | print(" # Post Upgrade tasks") | ||
602 | 224 | print("\n".join(post_upgrade_tasks)) | ||
603 | 225 | print(" # Complete leaders - These can all be done in parallel before non-leaders") | ||
604 | 226 | print("\n".join(leader_complete_tasks)) | ||
605 | 227 | print(" # Complete non-leaders - These can all be done in parallel after leaders complete") | ||
606 | 228 | print("\n".join(non_leader_complete_tasks)) | ||
607 | 229 | print(" # Post App Upgrade cleanup tasks") | ||
608 | 230 | print("\n".join(cleanup_tasks)) | ||
609 | 231 | print("\n") | ||
610 | 232 | |||
611 | 233 | |||
612 | 234 | def main(): | ||
613 | 235 | model_apps = common.get_model_apps("jsfy") | ||
614 | 236 | machines_planned = [] | ||
615 | 237 | |||
616 | 238 | print(PRE_UPGRADE_STEPS) | ||
617 | 239 | |||
618 | 240 | for phase in sorted(PHASE_APPS): | ||
619 | 241 | print("Phase {}\n========".format(phase)) | ||
620 | 242 | this_phase_apps = [] | ||
621 | 243 | for app in PHASE_APPS[phase]: | ||
622 | 244 | if 'landscape' in app: | ||
623 | 245 | continue | ||
624 | 246 | services = common.find_apps_from_charm(app, model_apps) | ||
625 | 247 | this_phase_apps.extend(services) | ||
626 | 248 | for service in services: | ||
627 | 249 | svc2charm[service] = app | ||
628 | 250 | if phase == 1: | ||
629 | 251 | plan_parallel(model_apps, this_phase_apps, machines_planned) | ||
630 | 252 | continue | ||
631 | 253 | for app in this_phase_apps: | ||
632 | 254 | plan_sequential(model_apps, app, machines_planned) | ||
633 | 255 | |||
634 | 256 | print(POST_UPGRADE_STEPS) | ||
635 | 257 | |||
636 | 258 | |||
637 | 259 | def plan_sequential(model_apps, app, machines_planned): | ||
638 | 260 | post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None) | ||
639 | 261 | manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None) | ||
640 | 262 | findlandscape = re.compile("landscape.*") | ||
641 | 263 | if re.match(findlandscape, app) is not None: | ||
642 | 264 | return | ||
643 | 265 | print(" App: {}".format(app)) | ||
644 | 266 | leader = common.find_leader_for_app(app, model_apps) | ||
645 | 267 | units = common.find_units_for_app(app, model_apps) | ||
646 | 268 | units.remove(leader) | ||
647 | 269 | unit_list = " ".join(units) | ||
648 | 270 | print(" Units {} {}".format(leader, unit_list)) | ||
649 | 271 | print(" {}".format(WATCH_COMMAND)) | ||
650 | 272 | print(" juju set-series {} {}".format(app, series)) | ||
651 | 273 | for unit in units: | ||
652 | 274 | hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps) | ||
653 | 275 | if hacluster_unit is not None: | ||
654 | 276 | print(" juju run-action --wait {} pause".format(hacluster_unit)) | ||
655 | 277 | for unit in units: | ||
656 | 278 | if svc2charm[app] not in NO_PAUSE_APPS: | ||
657 | 279 | print(" juju run-action --wait {} pause".format(unit)) | ||
658 | 280 | # gate for machine already done | ||
659 | 281 | leader_machine = common.find_machine_from_unit(app, leader, model_apps) | ||
660 | 282 | if leader_machine in machines_planned: | ||
661 | 283 | print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine)) | ||
662 | 284 | else: | ||
663 | 285 | # upgrade leader unit's machine | ||
664 | 286 | print(" juju upgrade-series {} prepare {} -y".format(leader_machine, series)) | ||
665 | 287 | print(" juju scp local {}:".format(leader_machine)) | ||
666 | 288 | print( | ||
667 | 289 | " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format( | ||
668 | 290 | leader_machine)) | ||
669 | 291 | print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(leader_machine)) | ||
670 | 292 | print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(leader_machine)) | ||
671 | 293 | print(" sleep 120; jw; echo 'Check status of workload for {}'".format(leader)) | ||
672 | 294 | if svc2charm[app] in ORIGIN_VAR_IS_SOURCE: | ||
673 | 295 | print(" juju config {} source={}".format(app, UCA)) | ||
674 | 296 | elif svc2charm[app] not in NO_ORIGIN: | ||
675 | 297 | print(" juju config {} openstack-origin={}".format(app, UCA)) | ||
676 | 298 | print(" sleep 120; jw") | ||
677 | 299 | if leader_machine in machines_planned: | ||
678 | 300 | print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine)) | ||
679 | 301 | else: | ||
680 | 302 | print(" juju upgrade-series {} complete".format(leader_machine)) | ||
681 | 303 | machines_planned.append(leader_machine) | ||
682 | 304 | for unit in units: | ||
683 | 305 | non_leader_machine = common.find_machine_from_unit(app, unit, model_apps) | ||
684 | 306 | if non_leader_machine in machines_planned: | ||
685 | 307 | print(" # Skipping dist-upgrade as already done prior for {}".format(non_leader_machine)) | ||
686 | 308 | else: | ||
687 | 309 | print(" juju upgrade-series {} prepare {} -y".format(non_leader_machine, series)) | ||
688 | 310 | print(" juju scp local {}:".format(non_leader_machine)) | ||
689 | 311 | print( | ||
690 | 312 | " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format( | ||
691 | 313 | non_leader_machine)) | ||
692 | 314 | print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format( | ||
693 | 315 | non_leader_machine)) | ||
694 | 316 | print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(non_leader_machine)) | ||
695 | 317 | print(" sleep 120; jw; echo 'Check status of workload for {}'".format(unit)) | ||
696 | 318 | print(" juju upgrade-series {} complete".format(non_leader_machine)) | ||
697 | 319 | machines_planned.append(non_leader_machine) | ||
698 | 320 | if post_leader_action: | ||
699 | 321 | print(" juju run-action --wait {} {}".format(leader, post_leader_action)) | ||
700 | 322 | if manual_steps: | ||
701 | 323 | print(" ## Manual step: {}".format(manual_steps)) | ||
702 | 324 | print('\n') | ||
703 | 325 | |||
704 | 326 | |||
705 | 327 | if __name__ == "__main__": | ||
706 | 328 | main() | ||
707 | diff --git a/setup.py b/setup.py | |||
708 | index 8c310d4..a9ecfa7 100644 | |||
709 | --- a/setup.py | |||
710 | +++ b/setup.py | |||
711 | @@ -48,6 +48,8 @@ setuptools.setup( | |||
712 | 48 | "console_scripts": [ | 48 | "console_scripts": [ |
713 | 49 | "cloud-upgrade-planner=cloud_upgrade_planner.cli:main", | 49 | "cloud-upgrade-planner=cloud_upgrade_planner.cli:main", |
714 | 50 | "update-charm-revisions=cloud_upgrade_planner.cli:refresh", | 50 | "update-charm-revisions=cloud_upgrade_planner.cli:refresh", |
715 | 51 | "ubuntu-series-upgrade-for-openstack=cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack:main", | ||
716 | 52 | "openstack-managed-upgrade=cloud_upgrade_planner.openstack_managed_upgrade:main", | ||
717 | 51 | ] | 53 | ] |
718 | 52 | }, | 54 | }, |
719 | 53 | setup_requires=["setuptools_scm"], | 55 | setup_requires=["setuptools_scm"], |
720 | diff --git a/tests/test_cli.py b/tests/test_cli.py | |||
721 | 54 | old mode 100755 | 56 | old mode 100755 |
722 | 55 | new mode 100644 | 57 | new mode 100644 |
723 | diff --git a/tests/test_ubuntu_series_upgrade_for_openstack.py b/tests/test_ubuntu_series_upgrade_for_openstack.py | |||
724 | 56 | new file mode 100644 | 58 | new file mode 100644 |
725 | index 0000000..8d80437 | |||
726 | --- /dev/null | |||
727 | +++ b/tests/test_ubuntu_series_upgrade_for_openstack.py | |||
728 | @@ -0,0 +1,728 @@ | |||
729 | 1 | import pytest | ||
730 | 2 | from unittest.mock import patch, mock_open, call | ||
731 | 3 | import yaml | ||
732 | 4 | |||
733 | 5 | from cloud_upgrade_planner import common as c | ||
734 | 6 | from cloud_upgrade_planner import openstack_managed_upgrade as o | ||
735 | 7 | from cloud_upgrade_planner import ubuntu_series_upgrade_for_openstack as u | ||
736 | 8 | |||
737 | 9 | |||
738 | 10 | JUJU_FILE_MOCK = """ | ||
739 | 11 | applications: | ||
740 | 12 | aodh-mysql-router: | ||
741 | 13 | can-upgrade-to: cs:mysql-router-11 | ||
742 | 14 | charm: cs:mysql-router-6 | ||
743 | 15 | easyrsa: | ||
744 | 16 | can-upgrade-to: cs:~containers/easyrsa-408 | ||
745 | 17 | charm: cs:~containers/easyrsa-345 | ||
746 | 18 | units: | ||
747 | 19 | easyrsa/0: | ||
748 | 20 | leader: true | ||
749 | 21 | machine: 7/lxd/3 | ||
750 | 22 | hacluster-neutron: | ||
751 | 23 | can-upgrade-to: cs:hacluster-78 | ||
752 | 24 | charm: cs:hacluster-74 | ||
753 | 25 | hacluster-nova: | ||
754 | 26 | can-upgrade-to: cs:hacluster-78 | ||
755 | 27 | charm: cs:hacluster-74 | ||
756 | 28 | hacluster-vault: | ||
757 | 29 | can-upgrade-to: cs:hacluster-78 | ||
758 | 30 | charm: cs:hacluster-74 | ||
759 | 31 | landscape-client: | ||
760 | 32 | charm: cs:landscape-client-35 | ||
761 | 33 | juju-lint: | ||
762 | 34 | charm: local:focal/juju-lint-1 | ||
763 | 35 | units: | ||
764 | 36 | juju-lint/0: | ||
765 | 37 | leader: true | ||
766 | 38 | machine: 8/lxd/15 | ||
767 | 39 | memcached: | ||
768 | 40 | charm: cs:memcached-34 | ||
769 | 41 | units: | ||
770 | 42 | memcached/1: | ||
771 | 43 | leader: true | ||
772 | 44 | machine: 5/lxd/4 | ||
773 | 45 | mysql-innodb-cluster: | ||
774 | 46 | can-upgrade-to: cs:mysql-innodb-cluster-11 | ||
775 | 47 | charm: cs:mysql-innodb-cluster-5 | ||
776 | 48 | units: | ||
777 | 49 | mysql-innodb-cluster/0: | ||
778 | 50 | machine: 3/lxd/8 | ||
779 | 51 | subordinates: | ||
780 | 52 | landscape-client/65: true | ||
781 | 53 | mysql-innodb-cluster/1: | ||
782 | 54 | leader: true | ||
783 | 55 | machine: 4/lxd/8 | ||
784 | 56 | subordinates: | ||
785 | 57 | landscape-client/66: true | ||
786 | 58 | mysql-innodb-cluster/2: | ||
787 | 59 | machine: 5/lxd/8 | ||
788 | 60 | subordinates: | ||
789 | 61 | landscape-client/67: true | ||
790 | 62 | neutron-api: | ||
791 | 63 | can-upgrade-to: cs:neutron-api-299 | ||
792 | 64 | charm: cs:neutron-api-292 | ||
793 | 65 | units: | ||
794 | 66 | neutron-api/0: | ||
795 | 67 | machine: 6/lxd/5 | ||
796 | 68 | subordinates: | ||
797 | 69 | hacluster-neutron/0: true | ||
798 | 70 | landscape-client/69: true | ||
799 | 71 | neutron-api-mysql-router/0: true | ||
800 | 72 | neutron-api/1: | ||
801 | 73 | machine: 7/lxd/6 | ||
802 | 74 | subordinates: | ||
803 | 75 | hacluster-neutron/1: true | ||
804 | 76 | landscape-client/68: true | ||
805 | 77 | neutron-api-mysql-router/1: true | ||
806 | 78 | neutron-api/2: | ||
807 | 79 | leader: true | ||
808 | 80 | machine: 8/lxd/5 | ||
809 | 81 | subordinates: | ||
810 | 82 | hacluster-neutron/2: true | ||
811 | 83 | landscape-client/70: true | ||
812 | 84 | neutron-api-mysql-router/2: true | ||
813 | 85 | nova-cloud-controller: | ||
814 | 86 | can-upgrade-to: cs:nova-cloud-controller-358 | ||
815 | 87 | charm: cs:nova-cloud-controller-353 | ||
816 | 88 | units: | ||
817 | 89 | nova-cloud-controller/0: | ||
818 | 90 | machine: 6/lxd/6 | ||
819 | 91 | subordinates: | ||
820 | 92 | hacluster-nova/0: true | ||
821 | 93 | landscape-client/72: true | ||
822 | 94 | nova-cloud-controller-mysql-router/0: true | ||
823 | 95 | nova-cloud-controller/1: | ||
824 | 96 | machine: 7/lxd/7 | ||
825 | 97 | subordinates: | ||
826 | 98 | hacluster-nova/2: true | ||
827 | 99 | landscape-client/73: true | ||
828 | 100 | nova-cloud-controller-mysql-router/2: true | ||
829 | 101 | nova-cloud-controller/2: | ||
830 | 102 | leader: true | ||
831 | 103 | machine: 8/lxd/6 | ||
832 | 104 | subordinates: | ||
833 | 105 | hacluster-nova/1: true | ||
834 | 106 | landscape-client/71: true | ||
835 | 107 | nova-cloud-controller-mysql-router/1: true | ||
836 | 108 | nova-cloud-controller-mysql-router: | ||
837 | 109 | can-upgrade-to: cs:mysql-router-11 | ||
838 | 110 | charm: cs:mysql-router-6 | ||
839 | 111 | nova-compute-kvm: | ||
840 | 112 | can-upgrade-to: cs:nova-compute-334 | ||
841 | 113 | charm: cs:nova-compute-325 | ||
842 | 114 | units: | ||
843 | 115 | nova-compute-kvm/1: | ||
844 | 116 | machine: '10' | ||
845 | 117 | subordinates: | ||
846 | 118 | landscape-client/19: true | ||
847 | 119 | nova-compute-kvm/10: | ||
848 | 120 | machine: '19' | ||
849 | 121 | subordinates: | ||
850 | 122 | landscape-client/10: true | ||
851 | 123 | nova-compute-kvm/11: | ||
852 | 124 | machine: '20' | ||
853 | 125 | subordinates: | ||
854 | 126 | landscape-client/20: true | ||
855 | 127 | nova-compute-kvm/12: | ||
856 | 128 | machine: '21' | ||
857 | 129 | subordinates: | ||
858 | 130 | landscape-client/11: true | ||
859 | 131 | nova-compute-kvm/13: | ||
860 | 132 | machine: '22' | ||
861 | 133 | subordinates: | ||
862 | 134 | landscape-client/4: true | ||
863 | 135 | nova-compute-kvm/7: | ||
864 | 136 | leader: true | ||
865 | 137 | machine: '16' | ||
866 | 138 | subordinates: | ||
867 | 139 | landscape-client/7: true | ||
868 | 140 | nova-compute-kvm/9: | ||
869 | 141 | machine: '18' | ||
870 | 142 | subordinates: | ||
871 | 143 | landscape-client/16: true | ||
872 | 144 | rabbitmq-server: | ||
873 | 145 | can-upgrade-to: cs:~llama-charmers-next/rabbitmq-server-6 | ||
874 | 146 | charm: cs:~llama-charmers-next/rabbitmq-server-5 | ||
875 | 147 | units: | ||
876 | 148 | rabbitmq-server/4: | ||
877 | 149 | machine: 6/lxd/12 | ||
878 | 150 | rabbitmq-server/5: | ||
879 | 151 | leader: true | ||
880 | 152 | machine: 7/lxd/12 | ||
881 | 153 | rabbitmq-server/6: | ||
882 | 154 | machine: 8/lxd/11 | ||
883 | 155 | vault: | ||
884 | 156 | can-upgrade-to: cs:vault-50 | ||
885 | 157 | charm: cs:vault-44 | ||
886 | 158 | units: | ||
887 | 159 | vault/0: | ||
888 | 160 | machine: '0' | ||
889 | 161 | subordinates: | ||
890 | 162 | hacluster-vault/1: true | ||
891 | 163 | landscape-client/24: true | ||
892 | 164 | vault/1: | ||
893 | 165 | leader: true | ||
894 | 166 | machine: '1' | ||
895 | 167 | subordinates: | ||
896 | 168 | hacluster-vault/2: true | ||
897 | 169 | landscape-client/23: true | ||
898 | 170 | vault/2: | ||
899 | 171 | machine: '2' | ||
900 | 172 | subordinates: | ||
901 | 173 | hacluster-vault/0: true | ||
902 | 174 | landscape-client/22: true | ||
903 | 175 | """ | ||
904 | 176 | JUJU_FILE_DICT = yaml.safe_load(JUJU_FILE_MOCK)["applications"] | ||
905 | 177 | |||
906 | 178 | SVC2CHARM = { | ||
907 | 179 | "aodh-mysql-router": "mysql-router", | ||
908 | 180 | "easyrsa": "easyrsa", | ||
909 | 181 | "hacluster-neutron": "hacluster", | ||
910 | 182 | "hacluster-nova": "hacluster", | ||
911 | 183 | "hacluster-vault": "hacluster", | ||
912 | 184 | "juju-lint": "juju-lint", | ||
913 | 185 | "landscape-client": "landscape-client", | ||
914 | 186 | "memcached": "memcached", | ||
915 | 187 | "mysql-innodb-cluster": "mysql-innodb-cluster", | ||
916 | 188 | "neutron-api": "neutron-api", | ||
917 | 189 | "nova-cloud-controller": "nova-cloud-controller", | ||
918 | 190 | "nova-cloud-controller-mysql-router": "mysql-router", | ||
919 | 191 | "nova-compute-kvm": "nova-compute", | ||
920 | 192 | "rabbitmq-server": "rabbitmq-server", | ||
921 | 193 | "vault": "vault", | ||
922 | 194 | } | ||
923 | 195 | |||
924 | 196 | |||
925 | 197 | class TestCommon: | ||
926 | 198 | @pytest.mark.parametrize( | ||
927 | 199 | "app,expected", | ||
928 | 200 | [ | ||
929 | 201 | ("memcached", "memcached/1"), | ||
930 | 202 | ("mysql-innodb-cluster", "mysql-innodb-cluster/1"), | ||
931 | 203 | ("neutron-api", "neutron-api/2"), | ||
932 | 204 | ], | ||
933 | 205 | ) | ||
934 | 206 | def test_find_leader_for_app(self, app, expected): | ||
935 | 207 | output = c.find_leader_for_app(app, JUJU_FILE_DICT) | ||
936 | 208 | assert output == expected | ||
937 | 209 | |||
938 | 210 | def test_get_model_apps_missing_file(self): | ||
939 | 211 | with pytest.raises(SystemExit): | ||
940 | 212 | c.get_model_apps("") | ||
941 | 213 | |||
942 | 214 | @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) | ||
943 | 215 | def test_get_model_apps_provided_file(self, juju_file): | ||
944 | 216 | output = c.get_model_apps("test") | ||
945 | 217 | assert output == JUJU_FILE_DICT | ||
946 | 218 | |||
947 | 219 | @pytest.mark.parametrize( | ||
948 | 220 | "charms,expected", | ||
949 | 221 | [ | ||
950 | 222 | ( | ||
951 | 223 | ["mysql-router", "nova-compute", "hacluster", "vault"], | ||
952 | 224 | [ | ||
953 | 225 | "aodh-mysql-router", | ||
954 | 226 | "nova-cloud-controller-mysql-router", | ||
955 | 227 | "nova-compute-kvm", | ||
956 | 228 | "hacluster-neutron", | ||
957 | 229 | "hacluster-nova", | ||
958 | 230 | "hacluster-vault", | ||
959 | 231 | "vault", | ||
960 | 232 | ], | ||
961 | 233 | ), | ||
962 | 234 | (["non-existing-charm"], []), | ||
963 | 235 | ], | ||
964 | 236 | ) | ||
965 | 237 | def test_find_charms_apps_in_model(self, charms, expected): | ||
966 | 238 | output = c.find_charms_apps_in_model(charms, JUJU_FILE_DICT, SVC2CHARM) | ||
967 | 239 | assert output == expected | ||
968 | 240 | |||
969 | 241 | @pytest.mark.parametrize( | ||
970 | 242 | "app,expected", | ||
971 | 243 | [ | ||
972 | 244 | ( | ||
973 | 245 | "nova-compute-kvm", | ||
974 | 246 | [ | ||
975 | 247 | "nova-compute-kvm/1", | ||
976 | 248 | "nova-compute-kvm/10", | ||
977 | 249 | "nova-compute-kvm/11", | ||
978 | 250 | "nova-compute-kvm/12", | ||
979 | 251 | "nova-compute-kvm/13", | ||
980 | 252 | "nova-compute-kvm/7", | ||
981 | 253 | "nova-compute-kvm/9", | ||
982 | 254 | ], | ||
983 | 255 | ), | ||
984 | 256 | ( | ||
985 | 257 | "mysql-innodb-cluster", | ||
986 | 258 | [ | ||
987 | 259 | "mysql-innodb-cluster/0", | ||
988 | 260 | "mysql-innodb-cluster/1", | ||
989 | 261 | "mysql-innodb-cluster/2", | ||
990 | 262 | ], | ||
991 | 263 | ), | ||
992 | 264 | ], | ||
993 | 265 | ) | ||
994 | 266 | def test_find_units_for_app(self, app, expected): | ||
995 | 267 | output = c.find_units_for_app(app, JUJU_FILE_DICT) | ||
996 | 268 | assert output == expected | ||
997 | 269 | |||
998 | 270 | @pytest.mark.parametrize( | ||
999 | 271 | "charm,expected", | ||
1000 | 272 | [ | ||
1001 | 273 | ("neutron-api", ["neutron-api"]), | ||
1002 | 274 | ("nova-compute", ["nova-compute-kvm"]), | ||
1003 | 275 | ( | ||
1004 | 276 | "mysql-router", | ||
1005 | 277 | ["aodh-mysql-router", "nova-cloud-controller-mysql-router"], | ||
1006 | 278 | ), | ||
1007 | 279 | ], | ||
1008 | 280 | ) | ||
1009 | 281 | def test_find_apps_from_charm(self, charm, expected): | ||
1010 | 282 | output = c.find_apps_from_charm(charm, JUJU_FILE_DICT) | ||
1011 | 283 | assert output == expected | ||
1012 | 284 | |||
1013 | 285 | def test_find_units_for_app_not_in_phase(self): | ||
1014 | 286 | with pytest.raises(KeyError): | ||
1015 | 287 | c.find_units_for_app("nova-cloud-controller-mysql-router", JUJU_FILE_DICT) | ||
1016 | 288 | |||
1017 | 289 | def test_find_units_for_app_missing(self): | ||
1018 | 290 | with pytest.raises(KeyError): | ||
1019 | 291 | c.find_units_for_app("appx", JUJU_FILE_DICT) | ||
1020 | 292 | |||
1021 | 293 | @pytest.mark.parametrize( | ||
1022 | 294 | "app,unit,expected", | ||
1023 | 295 | [ | ||
1024 | 296 | ("nova-compute-kvm", "nova-compute-kvm/9", "18"), | ||
1025 | 297 | ("memcached", "memcached/1", "5/lxd/4"), | ||
1026 | 298 | ], | ||
1027 | 299 | ) | ||
1028 | 300 | def test_find_machine_from_unit(self, app, unit, expected): | ||
1029 | 301 | output = c.find_machine_from_unit(app, unit, JUJU_FILE_DICT) | ||
1030 | 302 | assert output == expected | ||
1031 | 303 | |||
1032 | 304 | def test_render_app_to_charm_dict(self): | ||
1033 | 305 | output = c.render_app_to_charm_dict(JUJU_FILE_DICT) | ||
1034 | 306 | assert output == SVC2CHARM | ||
1035 | 307 | |||
1036 | 308 | @pytest.mark.parametrize( | ||
1037 | 309 | "app,expected", | ||
1038 | 310 | [ | ||
1039 | 311 | ("aodh-mysql-router", True), | ||
1040 | 312 | ("memcached", False), | ||
1041 | 313 | ], | ||
1042 | 314 | ) | ||
1043 | 315 | def test_available_upgrade(self, app, expected): | ||
1044 | 316 | output = c.available_upgrade(app, JUJU_FILE_DICT) | ||
1045 | 317 | assert output == expected | ||
1046 | 318 | |||
1047 | 319 | @pytest.mark.parametrize( | ||
1048 | 320 | "app,unit,expected", | ||
1049 | 321 | [ | ||
1050 | 322 | ("nova-cloud-controller", "nova-cloud-controller/1", "hacluster-nova/2"), | ||
1051 | 323 | ("neutron-api", "neutron-api/2", "hacluster-neutron/2"), | ||
1052 | 324 | ("nova-compute-kvm", "nova-compute-kvm/1", None), # missing HA | ||
1053 | 325 | ("memcached", "memcached/1", None), # missing subordinates | ||
1054 | 326 | ], | ||
1055 | 327 | ) | ||
1056 | 328 | def test_find_hacluster_for_unit(self, app, unit, expected): | ||
1057 | 329 | output = c.find_hacluster_for_unit(app, unit, JUJU_FILE_DICT) | ||
1058 | 330 | assert output == expected | ||
1059 | 331 | |||
1060 | 332 | |||
1061 | 333 | class TestOpenstackManagedUpgrade: | ||
1062 | 334 | @patch("builtins.print") | ||
1063 | 335 | @patch( | ||
1064 | 336 | "cloud_upgrade_planner.common.find_units_for_app", | ||
1065 | 337 | return_value=["neutron-api/0", "neutron-api/1", "neutron-api/2"], | ||
1066 | 338 | ) | ||
1067 | 339 | @patch( | ||
1068 | 340 | "cloud_upgrade_planner.common.find_leader_for_app", return_value="neutron-api/2" | ||
1069 | 341 | ) | ||
1070 | 342 | def test_plan_action_managed_phase_app(self, mock_leader, mock_units, mock_stdout): | ||
1071 | 343 | o.plan_action_managed_phase_app("test-app", JUJU_FILE_DICT, "test-release") | ||
1072 | 344 | calls = [ | ||
1073 | 345 | call( | ||
1074 | 346 | " juju config test-app action-managed-upgrade=true openstack-origin=test-release" | ||
1075 | 347 | ), | ||
1076 | 348 | call(" {}".format(o.WATCH_COMMAND)), | ||
1077 | 349 | call(" juju run-action --wait neutron-api/2 openstack-upgrade"), | ||
1078 | 350 | call(" juju run-action --wait neutron-api/0 openstack-upgrade"), | ||
1079 | 351 | call(" juju run-action --wait neutron-api/1 openstack-upgrade"), | ||
1080 | 352 | ] | ||
1081 | 353 | assert mock_stdout.mock_calls == calls | ||
1082 | 354 | |||
1083 | 355 | @patch("builtins.print") | ||
1084 | 356 | def test_plan_charm_upgrade_local_path(self, mock_stdout): | ||
1085 | 357 | output = o.plan_charm_upgrade("juju-lint", JUJU_FILE_DICT, SVC2CHARM) | ||
1086 | 358 | calls = [ | ||
1087 | 359 | call( | ||
1088 | 360 | "\n WARNING!!! Application juju-lint has local charm path local:focal/juju-lint-1\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in local charm are in upstream charm before\n running upgrade-charm on this application.\n " | ||
1089 | 361 | ), | ||
1090 | 362 | call(" juju upgrade-charm juju-lint --switch cs:juju-lint"), | ||
1091 | 363 | call( | ||
1092 | 364 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" | ||
1093 | 365 | ), | ||
1094 | 366 | ] | ||
1095 | 367 | assert mock_stdout.mock_calls == calls | ||
1096 | 368 | |||
1097 | 369 | @patch("builtins.print") | ||
1098 | 370 | def test_plan_charm_upgrade_cs_path(self, mock_stdout): | ||
1099 | 371 | output = o.plan_charm_upgrade("neutron-api", JUJU_FILE_DICT, SVC2CHARM) | ||
1100 | 372 | calls = [ | ||
1101 | 373 | call(" juju upgrade-charm neutron-api"), | ||
1102 | 374 | call( | ||
1103 | 375 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" | ||
1104 | 376 | ), | ||
1105 | 377 | ] | ||
1106 | 378 | assert mock_stdout.mock_calls == calls | ||
1107 | 379 | |||
1108 | 380 | @patch("builtins.print") | ||
1109 | 381 | def test_plan_charm_upgrade_tilde(self, mock_stdout): | ||
1110 | 382 | output = o.plan_charm_upgrade("easyrsa", JUJU_FILE_DICT, SVC2CHARM) | ||
1111 | 383 | calls = [ | ||
1112 | 384 | call( | ||
1113 | 385 | "\n WARNING!!! Application easyrsa from unrecognized path cs:~containers/easyrsa-345\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n " | ||
1114 | 386 | ), | ||
1115 | 387 | call(" juju upgrade-charm easyrsa --switch cs:easyrsa"), | ||
1116 | 388 | call( | ||
1117 | 389 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" | ||
1118 | 390 | ), | ||
1119 | 391 | ] | ||
1120 | 392 | assert mock_stdout.mock_calls == calls | ||
1121 | 393 | |||
1122 | 394 | @patch("builtins.print") | ||
1123 | 395 | @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) | ||
1124 | 396 | def test_plan_charm_upgrade_deferred(self, defer_file, mock_stdout): | ||
1125 | 397 | output = o.plan_charm_upgrade("rabbitmq-server", JUJU_FILE_DICT, SVC2CHARM) | ||
1126 | 398 | calls = [ | ||
1127 | 399 | call( | ||
1128 | 400 | "\n WARNING!!! Application rabbitmq-server from unrecognized path cs:~llama-charmers-next/rabbitmq-server-5\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n " | ||
1129 | 401 | ), | ||
1130 | 402 | call("rabbitmq-server:\n enable-auto-restarts: False", file=defer_file()), | ||
1131 | 403 | call( | ||
1132 | 404 | " juju upgrade-charm rabbitmq-server --switch cs:rabbitmq-server --config deferred_restart_config_rabbitmq-server.yaml" | ||
1133 | 405 | ), | ||
1134 | 406 | call( | ||
1135 | 407 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n" | ||
1136 | 408 | ), | ||
1137 | 409 | ] | ||
1138 | 410 | assert mock_stdout.mock_calls == calls | ||
1139 | 411 | |||
1140 | 412 | |||
1141 | 413 | class TestUbuntuSeriesUpgrade: | ||
1142 | 414 | def test_main_missing_juju_file(self): | ||
1143 | 415 | with pytest.raises(SystemExit): | ||
1144 | 416 | u.main() | ||
1145 | 417 | |||
1146 | 418 | @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_sequential") | ||
1147 | 419 | @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_parallel") | ||
1148 | 420 | @patch( | ||
1149 | 421 | "cloud_upgrade_planner.common.find_apps_from_charm", return_value=["app"] | ||
1150 | 422 | ) | ||
1151 | 423 | @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) | ||
1152 | 424 | def test_main_with_juju_file( | ||
1153 | 425 | self, juju_file, mock_apps_from_charm, mock_parallel, mock_sequential | ||
1154 | 426 | ): | ||
1155 | 427 | u.main() | ||
1156 | 428 | |||
1157 | 429 | charm_calls = [] | ||
1158 | 430 | for phase in range(2): | ||
1159 | 431 | for app in u.PHASE_APPS[phase]: | ||
1160 | 432 | charm_calls.append(call(app, JUJU_FILE_DICT)) | ||
1161 | 433 | mock_apps_from_charm.assert_has_calls(charm_calls) | ||
1162 | 434 | |||
1163 | 435 | parallel_apps_no = len(u.PHASE_APPS[1]) | ||
1164 | 436 | mock_parallel.assert_called_once_with( | ||
1165 | 437 | JUJU_FILE_DICT, ["app"] * parallel_apps_no, [] | ||
1166 | 438 | ) | ||
1167 | 439 | |||
1168 | 440 | sequential_apps_no = len(u.PHASE_APPS[0]) + len(u.PHASE_APPS[2]) | ||
1169 | 441 | seq_calls = [call(JUJU_FILE_DICT, "app", []) for _ in range(sequential_apps_no)] | ||
1170 | 442 | mock_sequential.assert_has_calls(seq_calls) | ||
1171 | 443 | |||
1172 | 444 | @patch("builtins.print") | ||
1173 | 445 | @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK) | ||
1174 | 446 | def test_main_output(self, juju_file, mock_stdout): | ||
1175 | 447 | u.main() | ||
1176 | 448 | calls = [ | ||
1177 | 449 | call(u.PRE_UPGRADE_STEPS), | ||
1178 | 450 | call("Phase 0\n========"), | ||
1179 | 451 | call(" App: rabbitmq-server"), | ||
1180 | 452 | call(" Units rabbitmq-server/5 rabbitmq-server/4 rabbitmq-server/6"), | ||
1181 | 453 | call( | ||
1182 | 454 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" | ||
1183 | 455 | ), | ||
1184 | 456 | call(" juju set-series rabbitmq-server bionic"), | ||
1185 | 457 | call(" juju run-action --wait rabbitmq-server/4 pause"), | ||
1186 | 458 | call(" juju run-action --wait rabbitmq-server/6 pause"), | ||
1187 | 459 | call(" juju upgrade-series 7/lxd/12 prepare bionic -y"), | ||
1188 | 460 | call(" juju scp local 7/lxd/12:"), | ||
1189 | 461 | call( | ||
1190 | 462 | " juju run --machine 7/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1191 | 463 | ), | ||
1192 | 464 | call( | ||
1193 | 465 | " juju run --machine 7/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1194 | 466 | ), | ||
1195 | 467 | call(" juju run --machine 7/lxd/12 --timeout=5m 'sudo init 6'"), | ||
1196 | 468 | call( | ||
1197 | 469 | " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/5'" | ||
1198 | 470 | ), | ||
1199 | 471 | call(" juju config rabbitmq-server source=distro"), | ||
1200 | 472 | call(" sleep 120; jw"), | ||
1201 | 473 | call(" juju upgrade-series 7/lxd/12 complete"), | ||
1202 | 474 | call(" juju upgrade-series 6/lxd/12 prepare bionic -y"), | ||
1203 | 475 | call(" juju scp local 6/lxd/12:"), | ||
1204 | 476 | call( | ||
1205 | 477 | " juju run --machine 6/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1206 | 478 | ), | ||
1207 | 479 | call( | ||
1208 | 480 | " juju run --machine 6/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1209 | 481 | ), | ||
1210 | 482 | call(" juju run --machine 6/lxd/12 --timeout=5m 'sudo init 6'"), | ||
1211 | 483 | call( | ||
1212 | 484 | " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/4'" | ||
1213 | 485 | ), | ||
1214 | 486 | call(" juju upgrade-series 6/lxd/12 complete"), | ||
1215 | 487 | call(" juju upgrade-series 8/lxd/11 prepare bionic -y"), | ||
1216 | 488 | call(" juju scp local 8/lxd/11:"), | ||
1217 | 489 | call( | ||
1218 | 490 | " juju run --machine 8/lxd/11 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1219 | 491 | ), | ||
1220 | 492 | call( | ||
1221 | 493 | " juju run --machine 8/lxd/11 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1222 | 494 | ), | ||
1223 | 495 | call(" juju run --machine 8/lxd/11 --timeout=5m 'sudo init 6'"), | ||
1224 | 496 | call( | ||
1225 | 497 | " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/6'" | ||
1226 | 498 | ), | ||
1227 | 499 | call(" juju upgrade-series 8/lxd/11 complete"), | ||
1228 | 500 | call( | ||
1229 | 501 | " juju run-action --wait rabbitmq-server/5 complete-cluster-series-upgrade" | ||
1230 | 502 | ), | ||
1231 | 503 | call("\n"), | ||
1232 | 504 | call(" App: vault"), | ||
1233 | 505 | call(" Units vault/1 vault/0 vault/2"), | ||
1234 | 506 | call( | ||
1235 | 507 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" | ||
1236 | 508 | ), | ||
1237 | 509 | call(" juju set-series vault bionic"), | ||
1238 | 510 | call(" juju run-action --wait hacluster-vault/1 pause"), | ||
1239 | 511 | call(" juju run-action --wait hacluster-vault/0 pause"), | ||
1240 | 512 | call(" juju run-action --wait vault/0 pause"), | ||
1241 | 513 | call(" juju run-action --wait vault/2 pause"), | ||
1242 | 514 | call(" juju upgrade-series 1 prepare bionic -y"), | ||
1243 | 515 | call(" juju scp local 1:"), | ||
1244 | 516 | call( | ||
1245 | 517 | " juju run --machine 1 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1246 | 518 | ), | ||
1247 | 519 | call( | ||
1248 | 520 | " juju run --machine 1 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1249 | 521 | ), | ||
1250 | 522 | call(" juju run --machine 1 --timeout=5m 'sudo init 6'"), | ||
1251 | 523 | call(" sleep 120; jw; echo 'Check status of workload for vault/1'"), | ||
1252 | 524 | call(" sleep 120; jw"), | ||
1253 | 525 | call(" juju upgrade-series 1 complete"), | ||
1254 | 526 | call(" juju upgrade-series 0 prepare bionic -y"), | ||
1255 | 527 | call(" juju scp local 0:"), | ||
1256 | 528 | call( | ||
1257 | 529 | " juju run --machine 0 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1258 | 530 | ), | ||
1259 | 531 | call( | ||
1260 | 532 | " juju run --machine 0 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1261 | 533 | ), | ||
1262 | 534 | call(" juju run --machine 0 --timeout=5m 'sudo init 6'"), | ||
1263 | 535 | call(" sleep 120; jw; echo 'Check status of workload for vault/0'"), | ||
1264 | 536 | call(" juju upgrade-series 0 complete"), | ||
1265 | 537 | call(" juju upgrade-series 2 prepare bionic -y"), | ||
1266 | 538 | call(" juju scp local 2:"), | ||
1267 | 539 | call( | ||
1268 | 540 | " juju run --machine 2 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1269 | 541 | ), | ||
1270 | 542 | call( | ||
1271 | 543 | " juju run --machine 2 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1272 | 544 | ), | ||
1273 | 545 | call(" juju run --machine 2 --timeout=5m 'sudo init 6'"), | ||
1274 | 546 | call(" sleep 120; jw; echo 'Check status of workload for vault/2'"), | ||
1275 | 547 | call(" juju upgrade-series 2 complete"), | ||
1276 | 548 | call( | ||
1277 | 549 | " ## Manual step: You will need to unseal the vault after each unit reboots" | ||
1278 | 550 | ), | ||
1279 | 551 | call("\n"), | ||
1280 | 552 | call("Phase 1\n========"), | ||
1281 | 553 | call(" juju set-series neutron-api bionic"), | ||
1282 | 554 | call(" juju set-series nova-cloud-controller bionic"), | ||
1283 | 555 | call(" juju set-series memcached bionic"), | ||
1284 | 556 | call(" juju set-series easyrsa bionic"), | ||
1285 | 557 | call(" # Pause non-leaders"), | ||
1286 | 558 | call( | ||
1287 | 559 | " juju run-action --wait hacluster-neutron/0 pause\n juju run-action --wait hacluster-neutron/1 pause\n juju run-action --wait hacluster-nova/0 pause\n juju run-action --wait hacluster-nova/2 pause" | ||
1288 | 560 | ), | ||
1289 | 561 | call( | ||
1290 | 562 | " juju run-action --wait neutron-api/0 pause\n juju run-action --wait neutron-api/1 pause\n juju run-action --wait nova-cloud-controller/0 pause\n juju run-action --wait nova-cloud-controller/1 pause" | ||
1291 | 563 | ), | ||
1292 | 564 | call( | ||
1293 | 565 | " # Prepare leaders - These can be done in parallel before non-leaders" | ||
1294 | 566 | ), | ||
1295 | 567 | call( | ||
1296 | 568 | " juju upgrade-series 8/lxd/5 prepare bionic -y # neutron-api/2\n juju upgrade-series 8/lxd/6 prepare bionic -y # nova-cloud-controller/2\n juju upgrade-series 5/lxd/4 prepare bionic -y # memcached/1\n juju upgrade-series 7/lxd/3 prepare bionic -y # easyrsa/0" | ||
1297 | 569 | ), | ||
1298 | 570 | call( | ||
1299 | 571 | " # Prepare non-leaders - These can be done in parallel after leaders" | ||
1300 | 572 | ), | ||
1301 | 573 | call( | ||
1302 | 574 | " juju upgrade-series 6/lxd/5 prepare bionic -y # neutron-api/0\n juju upgrade-series 7/lxd/6 prepare bionic -y # neutron-api/1\n juju upgrade-series 6/lxd/6 prepare bionic -y # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 prepare bionic -y # nova-cloud-controller/1" | ||
1303 | 575 | ), | ||
1304 | 576 | call( | ||
1305 | 577 | " for i in 6/lxd/5 7/lxd/6 8/lxd/5 6/lxd/6 7/lxd/7 8/lxd/6 5/lxd/4 7/lxd/3; do juju scp local $i: ; done" | ||
1306 | 578 | ), | ||
1307 | 579 | call(" # Update all machines to latest patches"), | ||
1308 | 580 | call( | ||
1309 | 581 | " ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step" | ||
1310 | 582 | ), | ||
1311 | 583 | call( | ||
1312 | 584 | " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1313 | 585 | ), | ||
1314 | 586 | call(" # Perform do-release-upgrade"), | ||
1315 | 587 | call( | ||
1316 | 588 | " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1317 | 589 | ), | ||
1318 | 590 | call( | ||
1319 | 591 | " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" | ||
1320 | 592 | ), | ||
1321 | 593 | call( | ||
1322 | 594 | " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" | ||
1323 | 595 | ), | ||
1324 | 596 | call( | ||
1325 | 597 | " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot" | ||
1326 | 598 | ), | ||
1327 | 599 | call(" # Reboot all API machines into bionic"), | ||
1328 | 600 | call( | ||
1329 | 601 | " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=10m 'sudo init 6'" | ||
1330 | 602 | ), | ||
1331 | 603 | call(" # Post Upgrade tasks"), | ||
1332 | 604 | call( | ||
1333 | 605 | " juju config neutron-api openstack-origin=distro\n juju config nova-cloud-controller openstack-origin=distro\n juju config memcached openstack-origin=distro\n juju config easyrsa openstack-origin=distro" | ||
1334 | 606 | ), | ||
1335 | 607 | call( | ||
1336 | 608 | " # Complete leaders - These can all be done in parallel before non-leaders" | ||
1337 | 609 | ), | ||
1338 | 610 | call( | ||
1339 | 611 | " echo 'Check status of workload for neutron-api/2'\n juju upgrade-series 8/lxd/5 complete # neutron-api/2\n echo 'Check status of workload for nova-cloud-controller/2'\n juju upgrade-series 8/lxd/6 complete # nova-cloud-controller/2\n echo 'Check status of workload for memcached/1'\n juju upgrade-series 5/lxd/4 complete # memcached/1\n echo 'Check status of workload for easyrsa/0'\n juju upgrade-series 7/lxd/3 complete # easyrsa/0" | ||
1340 | 612 | ), | ||
1341 | 613 | call( | ||
1342 | 614 | " # Complete non-leaders - These can all be done in parallel after leaders complete" | ||
1343 | 615 | ), | ||
1344 | 616 | call( | ||
1345 | 617 | " juju upgrade-series 6/lxd/5 complete # neutron-api/0\n juju upgrade-series 7/lxd/6 complete # neutron-api/1\n juju upgrade-series 6/lxd/6 complete # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 complete # nova-cloud-controller/1" | ||
1346 | 618 | ), | ||
1347 | 619 | call(" # Post App Upgrade cleanup tasks"), | ||
1348 | 620 | call(""), | ||
1349 | 621 | call("\n"), | ||
1350 | 622 | call("Phase 2\n========"), | ||
1351 | 623 | call(" App: nova-compute-kvm"), | ||
1352 | 624 | call( | ||
1353 | 625 | " Units nova-compute-kvm/7 nova-compute-kvm/1 nova-compute-kvm/10 nova-compute-kvm/11 nova-compute-kvm/12 nova-compute-kvm/13 nova-compute-kvm/9" | ||
1354 | 626 | ), | ||
1355 | 627 | call( | ||
1356 | 628 | " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"" | ||
1357 | 629 | ), | ||
1358 | 630 | call(" juju set-series nova-compute-kvm bionic"), | ||
1359 | 631 | call(" juju upgrade-series 16 prepare bionic -y"), | ||
1360 | 632 | call(" juju scp local 16:"), | ||
1361 | 633 | call( | ||
1362 | 634 | " juju run --machine 16 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1363 | 635 | ), | ||
1364 | 636 | call( | ||
1365 | 637 | " juju run --machine 16 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1366 | 638 | ), | ||
1367 | 639 | call(" juju run --machine 16 --timeout=5m 'sudo init 6'"), | ||
1368 | 640 | call( | ||
1369 | 641 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/7'" | ||
1370 | 642 | ), | ||
1371 | 643 | call(" juju config nova-compute-kvm openstack-origin=distro"), | ||
1372 | 644 | call(" sleep 120; jw"), | ||
1373 | 645 | call(" juju upgrade-series 16 complete"), | ||
1374 | 646 | call(" juju upgrade-series 10 prepare bionic -y"), | ||
1375 | 647 | call(" juju scp local 10:"), | ||
1376 | 648 | call( | ||
1377 | 649 | " juju run --machine 10 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1378 | 650 | ), | ||
1379 | 651 | call( | ||
1380 | 652 | " juju run --machine 10 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1381 | 653 | ), | ||
1382 | 654 | call(" juju run --machine 10 --timeout=5m 'sudo init 6'"), | ||
1383 | 655 | call( | ||
1384 | 656 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/1'" | ||
1385 | 657 | ), | ||
1386 | 658 | call(" juju upgrade-series 10 complete"), | ||
1387 | 659 | call(" juju upgrade-series 19 prepare bionic -y"), | ||
1388 | 660 | call(" juju scp local 19:"), | ||
1389 | 661 | call( | ||
1390 | 662 | " juju run --machine 19 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1391 | 663 | ), | ||
1392 | 664 | call( | ||
1393 | 665 | " juju run --machine 19 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1394 | 666 | ), | ||
1395 | 667 | call(" juju run --machine 19 --timeout=5m 'sudo init 6'"), | ||
1396 | 668 | call( | ||
1397 | 669 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/10'" | ||
1398 | 670 | ), | ||
1399 | 671 | call(" juju upgrade-series 19 complete"), | ||
1400 | 672 | call(" juju upgrade-series 20 prepare bionic -y"), | ||
1401 | 673 | call(" juju scp local 20:"), | ||
1402 | 674 | call( | ||
1403 | 675 | " juju run --machine 20 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1404 | 676 | ), | ||
1405 | 677 | call( | ||
1406 | 678 | " juju run --machine 20 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1407 | 679 | ), | ||
1408 | 680 | call(" juju run --machine 20 --timeout=5m 'sudo init 6'"), | ||
1409 | 681 | call( | ||
1410 | 682 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/11'" | ||
1411 | 683 | ), | ||
1412 | 684 | call(" juju upgrade-series 20 complete"), | ||
1413 | 685 | call(" juju upgrade-series 21 prepare bionic -y"), | ||
1414 | 686 | call(" juju scp local 21:"), | ||
1415 | 687 | call( | ||
1416 | 688 | " juju run --machine 21 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1417 | 689 | ), | ||
1418 | 690 | call( | ||
1419 | 691 | " juju run --machine 21 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1420 | 692 | ), | ||
1421 | 693 | call(" juju run --machine 21 --timeout=5m 'sudo init 6'"), | ||
1422 | 694 | call( | ||
1423 | 695 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/12'" | ||
1424 | 696 | ), | ||
1425 | 697 | call(" juju upgrade-series 21 complete"), | ||
1426 | 698 | call(" juju upgrade-series 22 prepare bionic -y"), | ||
1427 | 699 | call(" juju scp local 22:"), | ||
1428 | 700 | call( | ||
1429 | 701 | " juju run --machine 22 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1430 | 702 | ), | ||
1431 | 703 | call( | ||
1432 | 704 | " juju run --machine 22 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1433 | 705 | ), | ||
1434 | 706 | call(" juju run --machine 22 --timeout=5m 'sudo init 6'"), | ||
1435 | 707 | call( | ||
1436 | 708 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/13'" | ||
1437 | 709 | ), | ||
1438 | 710 | call(" juju upgrade-series 22 complete"), | ||
1439 | 711 | call(" juju upgrade-series 18 prepare bionic -y"), | ||
1440 | 712 | call(" juju scp local 18:"), | ||
1441 | 713 | call( | ||
1442 | 714 | " juju run --machine 18 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'" | ||
1443 | 715 | ), | ||
1444 | 716 | call( | ||
1445 | 717 | " juju run --machine 18 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'" | ||
1446 | 718 | ), | ||
1447 | 719 | call(" juju run --machine 18 --timeout=5m 'sudo init 6'"), | ||
1448 | 720 | call( | ||
1449 | 721 | " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/9'" | ||
1450 | 722 | ), | ||
1451 | 723 | call(" juju upgrade-series 18 complete"), | ||
1452 | 724 | call("\n"), | ||
1453 | 725 | call(u.POST_UPGRADE_STEPS), | ||
1454 | 726 | ] | ||
1455 | 727 | |||
1456 | 728 | assert mock_stdout.mock_calls == calls |
This merge proposal is being monitored by mergebot. Change the status to Approved to merge.