Merge ~smigiel-dariusz/cloud-upgrade-planner:pre_integration into cloud-upgrade-planner:master

Proposed by Dariusz Smigiel
Status: Merged
Approved by: James Troup
Approved revision: 3a2068a958d16e398e45fcf0a1cde80e16d137b5
Merged at revision: bf1c391b152991cfca9aa6311e471e48302c7f1d
Proposed branch: ~smigiel-dariusz/cloud-upgrade-planner:pre_integration
Merge into: cloud-upgrade-planner:master
Diff against target: 1456 lines (+1412/-0)
5 files modified
cloud_upgrade_planner/common.py (+76/-0)
cloud_upgrade_planner/openstack_managed_upgrade.py (+278/-0)
cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py (+328/-0)
setup.py (+2/-0)
tests/test_ubuntu_series_upgrade_for_openstack.py (+728/-0)
Reviewer Review Type Date Requested Status
BootStack Reviewers Pending
BootStack Reviewers Pending
Review via email: mp+409606@code.launchpad.net

Commit message

Migration of bootstack-tools to cloud-upgrade-planner

To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

This merge proposal is being monitored by mergebot. Change the status to Approved to merge.

Revision history for this message
James Troup (elmo) wrote :

I'll merge this, but there are somethings that need fixed in followups please:

 1) assumption of /home/ubuntu
 2) reference to Canonical Wiki

Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

Change successfully merged at revision bf1c391b152991cfca9aa6311e471e48302c7f1d

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloud_upgrade_planner/cli.py b/cloud_upgrade_planner/cli.py
0old mode 1007550old mode 100755
1new mode 1006441new mode 100644
diff --git a/cloud_upgrade_planner/common.py b/cloud_upgrade_planner/common.py
2new file mode 1006442new file mode 100644
index 0000000..a31016e
--- /dev/null
+++ b/cloud_upgrade_planner/common.py
@@ -0,0 +1,76 @@
1import re
2import sys
3import yaml
4
5
6def find_units_for_app(app, model_apps):
7 return list(model_apps[app]['units'].keys())
8
9
10def find_leader_for_app(app, model_apps):
11 for unit in model_apps[app]['units'].keys():
12 if 'leader' in model_apps[app]['units'][unit]:
13 return unit
14
15
16def find_charms_apps_in_model(charms, model_apps, svc2charm):
17 apps = []
18 for charm in charms:
19 apps_of_charm = [app for app in model_apps.keys() if svc2charm[app] == charm]
20 apps.extend(apps_of_charm)
21 return apps
22
23
24def get_model_apps(status_file):
25 try:
26 with open(status_file) as myjsfy:
27 return yaml.safe_load(myjsfy)["applications"]
28 except IOError:
29 print("Please run a fresh copy of juju status --format yaml > jsfy")
30 sys.exit(1)
31
32
33def find_apps_from_charm(charm, model_apps):
34 # Match local: or cs: + ~user/charm, series/charm, or charm
35 regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format(charm))
36 return [app for app in model_apps.keys()
37 if re.match(regex, model_apps[app]['charm']) is not None]
38
39
40def find_machine_from_unit(app, unit, model_apps):
41 return model_apps[app]['units'][unit]['machine']
42
43
44def render_app_to_charm_dict(model_apps):
45 app2charm = {}
46 # Match local: or cs: + ~user/charm, series/charm, or charm
47 # regex groups are:
48 # 0: whole match
49 # 1: store|local
50 # 2: ~namespace or series
51 # 3: charm name
52 regex = re.compile(r"(local:|cs:)(~*.*/|)(.*)-\d+")
53 for app in model_apps.keys():
54 app2charm[app] = re.match(regex, model_apps[app]['charm']).group(3)
55 return app2charm
56
57
58def available_upgrade(app, model_apps):
59 if 'can-upgrade-to' in model_apps[app]:
60 return True
61 print(" # No charm upgrade available for {}. "
62 "Running: {}".format(app, model_apps[app]["charm"]))
63 return False
64
65
66def find_hacluster_for_unit(app, unit, model_apps):
67 if 'subordinates' not in model_apps[app]['units'][unit]:
68 return None
69 subordinate_units = model_apps[app]['units'][unit]['subordinates'].keys()
70 regex = re.compile("(local:|cs:)(~*.*/)*{}-\d+".format('hacluster'))
71 for subunit in subordinate_units:
72 subunit_app = subunit.split("/")[0]
73 if re.match(regex, model_apps[subunit_app]['charm']) is not None:
74 return subunit
75 return None
76
diff --git a/cloud_upgrade_planner/openstack_managed_upgrade.py b/cloud_upgrade_planner/openstack_managed_upgrade.py
0new file mode 10064477new file mode 100644
index 0000000..4b4960e
--- /dev/null
+++ b/cloud_upgrade_planner/openstack_managed_upgrade.py
@@ -0,0 +1,278 @@
1#!/usr/bin/python3
2
3# requres file in current directory named "jsfy" created with
4# juju status --format yaml > jsfy
5# (version of juju must support leadership in yaml output)
6
7# Assumptions
8# we assume you're deploying either cs:charmname or cs:~namespace/charmname
9# or local:<series>/<charm> versions of charms, and does not support
10# differently-named charms.
11
12# set this to the destination openstack-origin of your choice
13# TODO:
14# include optional unit pauses for pure clean HA
15# include evacuating and monitoring each nova compute node
16# include neutron-gateway HA router migrations if pausing
17# inclue gnocchi/aodh/octavia/placement installs
18
19import argparse
20import re
21import sys
22
23from cloud_upgrade_planner import common
24
25# The phases of the update. After any of these phases, updates can be paused
26# Each phase is recommended to take a day, with phases 3 and 4 being combined
27PHASE_APPS = {0: ["openstack-service-checks",
28 "percona-cluster",
29 "rabbitmq-server"],
30 1: ["keystone",
31 "ceph-mon"],
32 2: ["ceph-fs",
33 "ceph-radosgw",
34 "swift-proxy",
35 "swift-storage",
36 "cinder",
37 "cinder-ceph",
38 "cinder-backup",
39 "barbican",
40 "glance",
41 "aodh",
42 "gnocchi",
43 "ceilometer",
44 "designate",
45 "designate-bind",
46 "neutron-api",
47 "nova-cloud-controller",
48 "heat",
49 "manila",
50 "manila-generic",
51 "openstack-dashboard",
52 "placement",
53 "octavia",
54 "easyrsa",
55 "ovn-central"],
56 3: ["neutron-gateway",
57 "nova-compute",
58 "neutron-openvswitch",
59 "ovn-chassis",
60 "ceph-osd"]}
61
62ACTION_MANAGED_PHASES = [1, 3]
63DEFERRED_WAIT_PHASES = [2]
64
65# These charms use source: instead of openstack-origin: config flags
66ORIGIN_VAR_IS_SOURCE = ["ceph-osd",
67 "ceph-mon",
68 "ceph-radosgw",
69 "percona-cluster",
70 "rabbitmq-server"]
71
72# These charms do not have openstack-origin/source config flags
73CHARM_UPGRADES_ONLY = ["neutron-openvswitch",
74 "ovn-chassis",
75 "memcached",
76 "mongodb",
77 "graylog",
78 "prometheus",
79 "openstack-service-checks",
80 "cinder-ceph",
81 "cinder-backup",
82 "easyrsa",
83 "designate-bind"]
84
85# Enable deferred restarts on these charms as they are upgraded
86DEFERRED_RESTART_CHARMS = [
87 "neutron-gateway",
88 "neutron-openvswitch",
89 "ovn-central",
90 "ovn-chassis",
91 "ovn-dedicated-chassis",
92 "rabbitmq-server",
93]
94
95# TODO: When upgrading to these versions, add step to install these charms
96# between Phase 1 and Phase 2
97NEW_CHARMS_FOR_VERSIONS = {"train": ["placement"],
98 "queens": ["octavia"],
99 "ocata": ["aodh", "gnocchi"]}
100
101# This can be used both for required --switch changes for 19.04+ or can be
102# populated with specific version URLs
103# e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be
104# switched to cs:~openstack-charmers/cinder-backup
105# Anything not called out with these paths will be switched to cs:<charmname>
106# if currently running cs:~*/<charmname> or local:<charmname>
107CHARMSTORE_TILDE_PATHS = {
108 "cinder-backup": "cs:~openstack-charmers/cinder-backup",
109 "easyrsa": "cs:~containers/easyrsa",
110 "policy-routing": "cs:~canonical-bootstack/policy-routing",
111 "infra-node": "cs:~canonical-bootstack/infra-node",
112}
113
114JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing"
115WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES)
116
117PRE_UPGRADE_STEPS = """
118This playbook assumes all applications are running an identical openstack-origin/source of the prior version openstack.
119You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify the first and last apps are at the same level.
120
121### IMPORTANT ###
122Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders
123
124Please read the wiki for openstack upgrades for known issues and read the upstream charm guide release notes and issues.
125https://wiki.canonical.com/CDO/IS/Bootstack/Playbooks/OpenstackReleaseUpgradeActionManaged
126https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html
127
128There may be several things that need to be updated before changing to a new version of openstack, such as
129setting keystone tokens to fernet before upgrade to Rocky,
130ensuring that the ceph minimum_osd_version is set for the current version of ceph before upgrade,
131before upgrading to train, ensure that you've installed octavia and have migrated loadbalancers, as lbaasv2 is deprecated.
132
133See the referenced docs for these steps.
134
135After any upgrades, please check status of services via nagios/thruk to ensure you're not exhibiting new alerts.
136
137This upgrade guide does not cover router or VM migrations to avoid data-plane outages during package upgrades.
138Please be sure to plan accordingly based on customer request.
139
140All charms should be at the latest version before upgrading openstack.
141"""
142
143POST_UPGRADE_STEPS = """
144
145# POST UPGRADE ACTIONS
146Perform these steps after the cloud has completed upgrades
147
148Validate Nagios is clean and any disabled nova-compute services have been re-enabled.
149
150Update ceph minimum_osd_version per docs:
151https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/upgrade-issues.html#ceph-option-require-osd-release
152"""
153
154
155def plan_charm_upgrade(app, model_apps, svc2charm):
156 switch_args, config_filename = "", ""
157 charm = svc2charm[app]
158 current_charm = model_apps[app]['charm']
159 # strip version number (eg: cs:~foo/my-charm-123 becomes cs:~foo/my-charm)
160 current_charm_path = current_charm.rsplit("-", 1)[0]
161 target_tilde = CHARMSTORE_TILDE_PATHS.get(charm, None)
162
163 if "local:" in current_charm_path and not target_tilde:
164 # Provide warning to user that local charm was found and needs sanity check
165 print("""
166 WARNING!!! Application {app} has local charm path {current_charm}
167 Suggesting switch to latest promulgated cs: version.
168 Please ensure updates in local charm are in upstream charm before
169 running upgrade-charm on this application.
170 """.format(app=app, current_charm=current_charm))
171 switch_args += " --switch cs:{}".format(charm)
172 elif target_tilde and current_charm_path != target_tilde:
173 switch_args += " --switch {}".format(target_tilde)
174 elif "cs:~" in current_charm_path or "/" in current_charm_path:
175 print("""
176 WARNING!!! Application {app} from unrecognized path {current_charm}
177 Suggesting switch to latest promulgated cs: version.
178 Please ensure updates in the above branched charm are in the latest
179 promulgated charm before running upgrade-charm on this application.
180 """.format(app=app, current_charm=current_charm))
181 switch_args += " --switch {}".format("cs:{}".format(charm))
182
183 if charm in DEFERRED_RESTART_CHARMS:
184 config_filename = "deferred_restart_config_{}.yaml".format(app)
185 # Format of config file per 'juju deploy --help' doc
186 # application-name:
187 # option1: value1
188 # option2: value2
189 with open(config_filename, "w") as conf:
190 print("{}:\n enable-auto-restarts: False".format(app), file=conf)
191
192 if switch_args or common.available_upgrade(app, model_apps):
193 config_args = ' --config {}'.format(config_filename) if config_filename else ""
194 print(" juju upgrade-charm {}{}{}".format(app, switch_args, config_args))
195 print(" {}\n\n".format(WATCH_COMMAND))
196 elif config_filename:
197 config_args = ' --file {}'.format(config_filename) if config_filename else ""
198 print(" juju config {}{}".format(app, config_args))
199 print(" {}\n\n".format(WATCH_COMMAND))
200
201
202def plan_action_managed_phase_app(app, model_apps, release):
203 leader = common.find_leader_for_app(app, model_apps)
204 units = common.find_units_for_app(app, model_apps)
205 units.remove(leader)
206 unit_list = " ".join(units)
207 upgradeorder = [leader]
208 upgradeorder.extend(units)
209 print(" juju config {} "
210 "action-managed-upgrade=true "
211 "openstack-origin={}".format(app, release))
212 print(" {}".format(WATCH_COMMAND))
213 for unit in upgradeorder:
214 print(" juju run-action --wait {} "
215 "openstack-upgrade".format(unit))
216
217
218def parse_args(argv):
219 """Process CLI arguments."""
220 parser = argparse.ArgumentParser(
221 prog="openstack_managed_upgrade.py",
222 description=(
223 "this program uses the output from juju status --format yaml"
224 "and creates a playbook of juju upgrade actions to upgrade the"
225 "version of Openstack to the target version"
226 ),
227 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
228 )
229 parser.add_argument("-f", "--status-file", dest="status_file", type=str,
230 default="jsfy",
231 help="file containing 'juju status --format yaml' output")
232 parser.add_argument("-c", "--charms-only", dest="charms_only",
233 action="store_true",
234 help="Only plan charm upgrades")
235 parser.add_argument("release", type=str,
236 help="Set release to UCA target for upgrade such as: 'cloud:bionic-ussuri'")
237 return parser.parse_args(argv)
238
239
240def main(argv=None):
241 args = parse_args(argv)
242 model_apps = common.get_model_apps(args.status_file)
243 svc2charm = common.render_app_to_charm_dict(model_apps)
244 print(PRE_UPGRADE_STEPS)
245 for phase in sorted(PHASE_APPS):
246 print("Phase {}\n========".format(phase))
247 this_phase_apps = common.find_charms_apps_in_model(PHASE_APPS[phase],
248 model_apps, svc2charm)
249 for app in this_phase_apps:
250 # skip landscape upgrades
251 if "landscape" in app:
252 continue
253 print(" App: {}".format(app))
254 print(" Charm: {}".format(svc2charm[app]))
255 print(" Commands:")
256 plan_charm_upgrade(app, model_apps, svc2charm)
257 if args.charms_only:
258 continue
259 if svc2charm[app] in CHARM_UPGRADES_ONLY:
260 # no openstack repo upgrade for these charms
261 continue
262 if svc2charm[app] in ORIGIN_VAR_IS_SOURCE:
263 # not possible to be action-managed-upgrade
264 print(" juju config {} source={}".format(app, args.release))
265 elif phase in ACTION_MANAGED_PHASES:
266 plan_action_managed_phase_app(app, model_apps, args.release)
267 else:
268 print(" juju config {} openstack-origin={}"
269 " action-managed-upgrade=false".format(app, args.release))
270 if phase not in DEFERRED_WAIT_PHASES:
271 print(" {}\n".format(WATCH_COMMAND))
272 if not args.charms_only and phase in DEFERRED_WAIT_PHASES:
273 print("\n {}\n".format(WATCH_COMMAND))
274
275 print(POST_UPGRADE_STEPS)
276
277if __name__ == "__main__":
278 main(sys.argv[1:])
diff --git a/cloud_upgrade_planner/plan.py b/cloud_upgrade_planner/plan.py
0old mode 100755279old mode 100755
1new mode 100644280new mode 100644
diff --git a/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py
2new file mode 100644281new file mode 100644
index 0000000..8db72c1
--- /dev/null
+++ b/cloud_upgrade_planner/ubuntu_series_upgrade_for_openstack.py
@@ -0,0 +1,328 @@
1#!/usr/bin/python3
2
3# requres file in current directory named "jsfy" created with
4# juju status --format yaml > jsfy
5# (version of juju must support leadership in yaml output)
6
7# Assumptions
8# we assume you're deploying either cs:charmname or cs:~namespace/charmname
9# or local:<series>/<charm> versions of charms, and does not support
10# differently-named charms.
11
12# set this to the destination openstack-origin of your choice
13# TODO: add in parseargs
14# include stepped version callouts
15# include evacuating and monitoring each nova compute node
16# include neutron-gateway HA router migrations if pausing
17
18import re
19import sys
20import yaml
21
22from cloud_upgrade_planner import common
23
24# Set this to your target platform ### See TODOs
25UCA = "distro"
26series = "bionic"
27svc2charm = {}
28
29# The phases of the update. After any of these phases, updates can be paused
30# Each phase is recommended to take a day, with phases 3 and 4 being combined
31PHASE_APPS = {0: ["percona-cluster",
32 "rabbitmq-server",
33 "ceph-mon",
34 "ovn-central",
35 "etcd",
36 "vault"],
37 1: ["keystone",
38 "ceph-fs",
39 "ceph-radosgw",
40 "swift-proxy",
41 "swift-storage",
42 "cinder",
43 "barbican",
44 "glance",
45 "aodh",
46 "gnocchi",
47 "ceilometer",
48 "designate",
49 "designate-bind",
50 "neutron-api",
51 "nova-cloud-controller",
52 "heat",
53 "manila",
54 "manila-generic",
55 "openstack-dashboard",
56 "placement",
57 "octavia",
58 "memcached",
59 "easyrsa"],
60 2: ["nova-compute",
61 "neutron-gateway",
62 "ceph-osd"],
63}
64
65# These charms use source: instead of openstack-origin: config flags
66ORIGIN_VAR_IS_SOURCE = ["ceph-osd",
67 "ceph-mon",
68 "ceph-fs",
69 "ceph-radosgw",
70 "percona-cluster",
71 "rabbitmq-server"]
72
73# These charms only need the OS upgraded, no UCA tracking
74NO_ORIGIN = ["designate-bind", "vault"]
75
76# These charms do not have openstack-origin/source config flags
77CHARM_UPGRADES_ONLY = ["neutron-openvswitch",
78 "memcached",
79 "mongodb",
80 "graylog",
81 "prometheus",
82 "openstack-service-checks",
83 "cinder-ceph",
84 "cinder-backup"]
85
86NO_PAUSE_APPS = ["ceph-mon",
87 "ceph-osd",
88 "neutron-gateway",
89 "nova-compute"]
90
91POST_APP_UPGRADE_ACTIONS_FOR_LEADERS = {
92 "percona-cluster": "complete-cluster-series-upgrade",
93 "rabbitmq-server": "complete-cluster-series-upgrade",
94 "ceilometer": "ceilometer-upgrade",
95}
96
97POST_APP_UPGRADE_MANUAL_STEPS = {
98 "vault": "You will need to unseal the vault after each unit reboots",
99}
100# This can be used both for required --switch changes for 19.04+ or can be
101# populated with specific version URLs
102# e.g. cinder-backup is no longer at cs:cinder-backup and if upgrading must be
103# switched to cs:~openstack-charmers/cinder-backup
104CHARMSTORE_PATHS = {"cinder-backup": "cs:~openstack-charmers/cinder-backup"}
105JUJU_WORKING_STATES = "blocked|waiting|maint|error|hook|lost|executing"
106WATCH_COMMAND = "watch \"juju status|egrep '{}'\"".format(JUJU_WORKING_STATES)
107
108PRE_UPGRADE_STEPS = """
109This playbook assumes your current openstack revision is at the highest level of the current ubuntu series:
110 trusty-mitaka
111 xenial-queens
112 bionic-ussuri
113You can check 'juju config keystone openstack-origin' and 'juju config ceph-osd source' to verify.
114
115### IMPORTANT ###
116Be sure to regenerate this playbook each time you complete a section to ensure that the leaders in the plan match the current active leaders
117
118Perform these steps to prepare your environment/directory for the remaining playbook.
119
120# Login to MAAS WebUI and add the new series to MAAS under the Images tab.
121
122# Create "local" file for setting your apt preferences - Required for DRU to be non-interactive
123cat > local << EOF
124DPkg::options { "--force-confdef"; "--force-confnew"; }
125EOF
126
127# update the default-series of the model to the new series
128juju model-config default-series=bionic # or focal
129
130"""
131
132POST_UPGRADE_STEPS = """
133
134# POST UPGRADE ACTIONS
135Perform these steps after the cloud has completed upgrades
136
137# Remove the "local" file added to apt configs for DRU, as it's not correct for typical upgrades
138juju run --all 'sudo rm /etc/apt/apt.conf.d/local'
139
140N.B. Do not upgrade Nagios from xenial to bionic until all cloud nodes have completed upgrade to bionic.
141
142Upgrade all remaining machines (typically LMA stack) not previously captured:
143# find the units with:
144juju machines|grep <previous release|xenial|bionic|etc>
145
146# Check each of those machines that they're not already upgraded by listing /etc/os-release file
147juju run --machine <comma,separated,list,of,machines,left,at,old,version> 'hostname; grep VERSION_CODENAME /etc/os-release'
148
149# perform steps similiar to above for the remaining machines/applications.
150"""
151
152
153def plan_parallel(model_apps, parallel_apps, machines_planned):
154 cluster_pause_tasks = []
155 pause_tasks = []
156 leader_prepare_tasks = []
157 non_leader_prepare_tasks = []
158 upgrade_machines = []
159 post_upgrade_tasks = []
160 leader_complete_tasks = []
161 non_leader_complete_tasks = []
162 cleanup_tasks = []
163 for app in parallel_apps:
164 print(" juju set-series {} {}".format(app, series))
165 leader = common.find_leader_for_app(app, model_apps)
166 units = common.find_units_for_app(app, model_apps)
167 units.remove(leader)
168
169 post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None)
170 manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None)
171 if post_leader_action:
172 cleanup_tasks.append(" juju run-action --wait {} {}".format(leader, post_leader_action))
173 if manual_steps:
174 cleanup_tasks.append(" ## Manual step: {}".format(manual_steps))
175
176 for unit in units:
177 unit_machine = common.find_machine_from_unit(app, unit, model_apps)
178 if unit_machine in machines_planned:
179 # CONSIDER: Note that we decided to skip dist-upgrade since it was already done?
180 continue
181 hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps)
182 if hacluster_unit is not None:
183 cluster_pause_tasks.append(" juju run-action --wait {} pause".format(hacluster_unit))
184 if svc2charm[app] not in NO_PAUSE_APPS:
185 pause_tasks.append(" juju run-action --wait {} pause".format(unit))
186 non_leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(unit_machine, series, unit))
187 non_leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(unit_machine, unit))
188 upgrade_machines.append(unit_machine)
189 machines_planned.append(unit_machine)
190
191 leader_machine = common.find_machine_from_unit(app, leader, model_apps)
192 if leader_machine in machines_planned:
193 continue
194 # upgrade leader unit's machine
195 leader_prepare_tasks.append(" juju upgrade-series {} prepare {} -y # {}".format(leader_machine, series, leader))
196 upgrade_machines.append(leader_machine)
197 leader_complete_tasks.append(" echo 'Check status of workload for {}'".format(leader))
198 if svc2charm[app] in ORIGIN_VAR_IS_SOURCE:
199 post_upgrade_tasks.append(" juju config {} source={}".format(app, UCA))
200 elif svc2charm[app] not in NO_ORIGIN:
201 post_upgrade_tasks.append(" juju config {} openstack-origin={}".format(app,UCA))
202 leader_complete_tasks.append(" juju upgrade-series {} complete # {}".format(leader_machine, leader))
203 machines_planned.append(leader_machine)
204 print(" # Pause non-leaders")
205 print("\n".join(cluster_pause_tasks))
206 print("\n".join(pause_tasks))
207 print(" # Prepare leaders - These can be done in parallel before non-leaders")
208 print("\n".join(leader_prepare_tasks))
209 print(" # Prepare non-leaders - These can be done in parallel after leaders")
210 print("\n".join(non_leader_prepare_tasks))
211 print(" for i in {}; do juju scp local $i: ; done".format(" ".join(upgrade_machines)))
212 machine_spec = ",".join(upgrade_machines)
213 print(" # Update all machines to latest patches")
214 print(" ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step")
215 print(" juju run --machine {} --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format(machine_spec))
216 print(" # Perform do-release-upgrade")
217 print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(machine_spec))
218 print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot")
219 print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot")
220 print(" #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot")
221 print(" # Reboot all API machines into {}".format(series))
222 print(" juju run --machine {} --timeout=10m 'sudo init 6'".format(machine_spec))
223 print(" # Post Upgrade tasks")
224 print("\n".join(post_upgrade_tasks))
225 print(" # Complete leaders - These can all be done in parallel before non-leaders")
226 print("\n".join(leader_complete_tasks))
227 print(" # Complete non-leaders - These can all be done in parallel after leaders complete")
228 print("\n".join(non_leader_complete_tasks))
229 print(" # Post App Upgrade cleanup tasks")
230 print("\n".join(cleanup_tasks))
231 print("\n")
232
233
234def main():
235 model_apps = common.get_model_apps("jsfy")
236 machines_planned = []
237
238 print(PRE_UPGRADE_STEPS)
239
240 for phase in sorted(PHASE_APPS):
241 print("Phase {}\n========".format(phase))
242 this_phase_apps = []
243 for app in PHASE_APPS[phase]:
244 if 'landscape' in app:
245 continue
246 services = common.find_apps_from_charm(app, model_apps)
247 this_phase_apps.extend(services)
248 for service in services:
249 svc2charm[service] = app
250 if phase == 1:
251 plan_parallel(model_apps, this_phase_apps, machines_planned)
252 continue
253 for app in this_phase_apps:
254 plan_sequential(model_apps, app, machines_planned)
255
256 print(POST_UPGRADE_STEPS)
257
258
259def plan_sequential(model_apps, app, machines_planned):
260 post_leader_action = POST_APP_UPGRADE_ACTIONS_FOR_LEADERS.get(svc2charm[app], None)
261 manual_steps = POST_APP_UPGRADE_MANUAL_STEPS.get(svc2charm[app], None)
262 findlandscape = re.compile("landscape.*")
263 if re.match(findlandscape, app) is not None:
264 return
265 print(" App: {}".format(app))
266 leader = common.find_leader_for_app(app, model_apps)
267 units = common.find_units_for_app(app, model_apps)
268 units.remove(leader)
269 unit_list = " ".join(units)
270 print(" Units {} {}".format(leader, unit_list))
271 print(" {}".format(WATCH_COMMAND))
272 print(" juju set-series {} {}".format(app, series))
273 for unit in units:
274 hacluster_unit = common.find_hacluster_for_unit(app, unit, model_apps)
275 if hacluster_unit is not None:
276 print(" juju run-action --wait {} pause".format(hacluster_unit))
277 for unit in units:
278 if svc2charm[app] not in NO_PAUSE_APPS:
279 print(" juju run-action --wait {} pause".format(unit))
280 # gate for machine already done
281 leader_machine = common.find_machine_from_unit(app, leader, model_apps)
282 if leader_machine in machines_planned:
283 print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine))
284 else:
285 # upgrade leader unit's machine
286 print(" juju upgrade-series {} prepare {} -y".format(leader_machine, series))
287 print(" juju scp local {}:".format(leader_machine))
288 print(
289 " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format(
290 leader_machine))
291 print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(leader_machine))
292 print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(leader_machine))
293 print(" sleep 120; jw; echo 'Check status of workload for {}'".format(leader))
294 if svc2charm[app] in ORIGIN_VAR_IS_SOURCE:
295 print(" juju config {} source={}".format(app, UCA))
296 elif svc2charm[app] not in NO_ORIGIN:
297 print(" juju config {} openstack-origin={}".format(app, UCA))
298 print(" sleep 120; jw")
299 if leader_machine in machines_planned:
300 print(" # Skipping dist-upgrade as already done prior for {}".format(leader_machine))
301 else:
302 print(" juju upgrade-series {} complete".format(leader_machine))
303 machines_planned.append(leader_machine)
304 for unit in units:
305 non_leader_machine = common.find_machine_from_unit(app, unit, model_apps)
306 if non_leader_machine in machines_planned:
307 print(" # Skipping dist-upgrade as already done prior for {}".format(non_leader_machine))
308 else:
309 print(" juju upgrade-series {} prepare {} -y".format(non_leader_machine, series))
310 print(" juju scp local {}:".format(non_leader_machine))
311 print(
312 " juju run --machine {} --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'".format(
313 non_leader_machine))
314 print(" juju run --machine {} --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'".format(
315 non_leader_machine))
316 print(" juju run --machine {} --timeout=5m 'sudo init 6'".format(non_leader_machine))
317 print(" sleep 120; jw; echo 'Check status of workload for {}'".format(unit))
318 print(" juju upgrade-series {} complete".format(non_leader_machine))
319 machines_planned.append(non_leader_machine)
320 if post_leader_action:
321 print(" juju run-action --wait {} {}".format(leader, post_leader_action))
322 if manual_steps:
323 print(" ## Manual step: {}".format(manual_steps))
324 print('\n')
325
326
327if __name__ == "__main__":
328 main()
diff --git a/setup.py b/setup.py
index 8c310d4..a9ecfa7 100644
--- a/setup.py
+++ b/setup.py
@@ -48,6 +48,8 @@ setuptools.setup(
48 "console_scripts": [48 "console_scripts": [
49 "cloud-upgrade-planner=cloud_upgrade_planner.cli:main",49 "cloud-upgrade-planner=cloud_upgrade_planner.cli:main",
50 "update-charm-revisions=cloud_upgrade_planner.cli:refresh",50 "update-charm-revisions=cloud_upgrade_planner.cli:refresh",
51 "ubuntu-series-upgrade-for-openstack=cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack:main",
52 "openstack-managed-upgrade=cloud_upgrade_planner.openstack_managed_upgrade:main",
51 ]53 ]
52 },54 },
53 setup_requires=["setuptools_scm"],55 setup_requires=["setuptools_scm"],
diff --git a/tests/test_cli.py b/tests/test_cli.py
54old mode 10075556old mode 100755
55new mode 10064457new mode 100644
diff --git a/tests/test_ubuntu_series_upgrade_for_openstack.py b/tests/test_ubuntu_series_upgrade_for_openstack.py
56new file mode 10064458new file mode 100644
index 0000000..8d80437
--- /dev/null
+++ b/tests/test_ubuntu_series_upgrade_for_openstack.py
@@ -0,0 +1,728 @@
1import pytest
2from unittest.mock import patch, mock_open, call
3import yaml
4
5from cloud_upgrade_planner import common as c
6from cloud_upgrade_planner import openstack_managed_upgrade as o
7from cloud_upgrade_planner import ubuntu_series_upgrade_for_openstack as u
8
9
10JUJU_FILE_MOCK = """
11applications:
12 aodh-mysql-router:
13 can-upgrade-to: cs:mysql-router-11
14 charm: cs:mysql-router-6
15 easyrsa:
16 can-upgrade-to: cs:~containers/easyrsa-408
17 charm: cs:~containers/easyrsa-345
18 units:
19 easyrsa/0:
20 leader: true
21 machine: 7/lxd/3
22 hacluster-neutron:
23 can-upgrade-to: cs:hacluster-78
24 charm: cs:hacluster-74
25 hacluster-nova:
26 can-upgrade-to: cs:hacluster-78
27 charm: cs:hacluster-74
28 hacluster-vault:
29 can-upgrade-to: cs:hacluster-78
30 charm: cs:hacluster-74
31 landscape-client:
32 charm: cs:landscape-client-35
33 juju-lint:
34 charm: local:focal/juju-lint-1
35 units:
36 juju-lint/0:
37 leader: true
38 machine: 8/lxd/15
39 memcached:
40 charm: cs:memcached-34
41 units:
42 memcached/1:
43 leader: true
44 machine: 5/lxd/4
45 mysql-innodb-cluster:
46 can-upgrade-to: cs:mysql-innodb-cluster-11
47 charm: cs:mysql-innodb-cluster-5
48 units:
49 mysql-innodb-cluster/0:
50 machine: 3/lxd/8
51 subordinates:
52 landscape-client/65: true
53 mysql-innodb-cluster/1:
54 leader: true
55 machine: 4/lxd/8
56 subordinates:
57 landscape-client/66: true
58 mysql-innodb-cluster/2:
59 machine: 5/lxd/8
60 subordinates:
61 landscape-client/67: true
62 neutron-api:
63 can-upgrade-to: cs:neutron-api-299
64 charm: cs:neutron-api-292
65 units:
66 neutron-api/0:
67 machine: 6/lxd/5
68 subordinates:
69 hacluster-neutron/0: true
70 landscape-client/69: true
71 neutron-api-mysql-router/0: true
72 neutron-api/1:
73 machine: 7/lxd/6
74 subordinates:
75 hacluster-neutron/1: true
76 landscape-client/68: true
77 neutron-api-mysql-router/1: true
78 neutron-api/2:
79 leader: true
80 machine: 8/lxd/5
81 subordinates:
82 hacluster-neutron/2: true
83 landscape-client/70: true
84 neutron-api-mysql-router/2: true
85 nova-cloud-controller:
86 can-upgrade-to: cs:nova-cloud-controller-358
87 charm: cs:nova-cloud-controller-353
88 units:
89 nova-cloud-controller/0:
90 machine: 6/lxd/6
91 subordinates:
92 hacluster-nova/0: true
93 landscape-client/72: true
94 nova-cloud-controller-mysql-router/0: true
95 nova-cloud-controller/1:
96 machine: 7/lxd/7
97 subordinates:
98 hacluster-nova/2: true
99 landscape-client/73: true
100 nova-cloud-controller-mysql-router/2: true
101 nova-cloud-controller/2:
102 leader: true
103 machine: 8/lxd/6
104 subordinates:
105 hacluster-nova/1: true
106 landscape-client/71: true
107 nova-cloud-controller-mysql-router/1: true
108 nova-cloud-controller-mysql-router:
109 can-upgrade-to: cs:mysql-router-11
110 charm: cs:mysql-router-6
111 nova-compute-kvm:
112 can-upgrade-to: cs:nova-compute-334
113 charm: cs:nova-compute-325
114 units:
115 nova-compute-kvm/1:
116 machine: '10'
117 subordinates:
118 landscape-client/19: true
119 nova-compute-kvm/10:
120 machine: '19'
121 subordinates:
122 landscape-client/10: true
123 nova-compute-kvm/11:
124 machine: '20'
125 subordinates:
126 landscape-client/20: true
127 nova-compute-kvm/12:
128 machine: '21'
129 subordinates:
130 landscape-client/11: true
131 nova-compute-kvm/13:
132 machine: '22'
133 subordinates:
134 landscape-client/4: true
135 nova-compute-kvm/7:
136 leader: true
137 machine: '16'
138 subordinates:
139 landscape-client/7: true
140 nova-compute-kvm/9:
141 machine: '18'
142 subordinates:
143 landscape-client/16: true
144 rabbitmq-server:
145 can-upgrade-to: cs:~llama-charmers-next/rabbitmq-server-6
146 charm: cs:~llama-charmers-next/rabbitmq-server-5
147 units:
148 rabbitmq-server/4:
149 machine: 6/lxd/12
150 rabbitmq-server/5:
151 leader: true
152 machine: 7/lxd/12
153 rabbitmq-server/6:
154 machine: 8/lxd/11
155 vault:
156 can-upgrade-to: cs:vault-50
157 charm: cs:vault-44
158 units:
159 vault/0:
160 machine: '0'
161 subordinates:
162 hacluster-vault/1: true
163 landscape-client/24: true
164 vault/1:
165 leader: true
166 machine: '1'
167 subordinates:
168 hacluster-vault/2: true
169 landscape-client/23: true
170 vault/2:
171 machine: '2'
172 subordinates:
173 hacluster-vault/0: true
174 landscape-client/22: true
175"""
176JUJU_FILE_DICT = yaml.safe_load(JUJU_FILE_MOCK)["applications"]
177
178SVC2CHARM = {
179 "aodh-mysql-router": "mysql-router",
180 "easyrsa": "easyrsa",
181 "hacluster-neutron": "hacluster",
182 "hacluster-nova": "hacluster",
183 "hacluster-vault": "hacluster",
184 "juju-lint": "juju-lint",
185 "landscape-client": "landscape-client",
186 "memcached": "memcached",
187 "mysql-innodb-cluster": "mysql-innodb-cluster",
188 "neutron-api": "neutron-api",
189 "nova-cloud-controller": "nova-cloud-controller",
190 "nova-cloud-controller-mysql-router": "mysql-router",
191 "nova-compute-kvm": "nova-compute",
192 "rabbitmq-server": "rabbitmq-server",
193 "vault": "vault",
194}
195
196
197class TestCommon:
198 @pytest.mark.parametrize(
199 "app,expected",
200 [
201 ("memcached", "memcached/1"),
202 ("mysql-innodb-cluster", "mysql-innodb-cluster/1"),
203 ("neutron-api", "neutron-api/2"),
204 ],
205 )
206 def test_find_leader_for_app(self, app, expected):
207 output = c.find_leader_for_app(app, JUJU_FILE_DICT)
208 assert output == expected
209
210 def test_get_model_apps_missing_file(self):
211 with pytest.raises(SystemExit):
212 c.get_model_apps("")
213
214 @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK)
215 def test_get_model_apps_provided_file(self, juju_file):
216 output = c.get_model_apps("test")
217 assert output == JUJU_FILE_DICT
218
219 @pytest.mark.parametrize(
220 "charms,expected",
221 [
222 (
223 ["mysql-router", "nova-compute", "hacluster", "vault"],
224 [
225 "aodh-mysql-router",
226 "nova-cloud-controller-mysql-router",
227 "nova-compute-kvm",
228 "hacluster-neutron",
229 "hacluster-nova",
230 "hacluster-vault",
231 "vault",
232 ],
233 ),
234 (["non-existing-charm"], []),
235 ],
236 )
237 def test_find_charms_apps_in_model(self, charms, expected):
238 output = c.find_charms_apps_in_model(charms, JUJU_FILE_DICT, SVC2CHARM)
239 assert output == expected
240
241 @pytest.mark.parametrize(
242 "app,expected",
243 [
244 (
245 "nova-compute-kvm",
246 [
247 "nova-compute-kvm/1",
248 "nova-compute-kvm/10",
249 "nova-compute-kvm/11",
250 "nova-compute-kvm/12",
251 "nova-compute-kvm/13",
252 "nova-compute-kvm/7",
253 "nova-compute-kvm/9",
254 ],
255 ),
256 (
257 "mysql-innodb-cluster",
258 [
259 "mysql-innodb-cluster/0",
260 "mysql-innodb-cluster/1",
261 "mysql-innodb-cluster/2",
262 ],
263 ),
264 ],
265 )
266 def test_find_units_for_app(self, app, expected):
267 output = c.find_units_for_app(app, JUJU_FILE_DICT)
268 assert output == expected
269
270 @pytest.mark.parametrize(
271 "charm,expected",
272 [
273 ("neutron-api", ["neutron-api"]),
274 ("nova-compute", ["nova-compute-kvm"]),
275 (
276 "mysql-router",
277 ["aodh-mysql-router", "nova-cloud-controller-mysql-router"],
278 ),
279 ],
280 )
281 def test_find_apps_from_charm(self, charm, expected):
282 output = c.find_apps_from_charm(charm, JUJU_FILE_DICT)
283 assert output == expected
284
285 def test_find_units_for_app_not_in_phase(self):
286 with pytest.raises(KeyError):
287 c.find_units_for_app("nova-cloud-controller-mysql-router", JUJU_FILE_DICT)
288
289 def test_find_units_for_app_missing(self):
290 with pytest.raises(KeyError):
291 c.find_units_for_app("appx", JUJU_FILE_DICT)
292
293 @pytest.mark.parametrize(
294 "app,unit,expected",
295 [
296 ("nova-compute-kvm", "nova-compute-kvm/9", "18"),
297 ("memcached", "memcached/1", "5/lxd/4"),
298 ],
299 )
300 def test_find_machine_from_unit(self, app, unit, expected):
301 output = c.find_machine_from_unit(app, unit, JUJU_FILE_DICT)
302 assert output == expected
303
304 def test_render_app_to_charm_dict(self):
305 output = c.render_app_to_charm_dict(JUJU_FILE_DICT)
306 assert output == SVC2CHARM
307
308 @pytest.mark.parametrize(
309 "app,expected",
310 [
311 ("aodh-mysql-router", True),
312 ("memcached", False),
313 ],
314 )
315 def test_available_upgrade(self, app, expected):
316 output = c.available_upgrade(app, JUJU_FILE_DICT)
317 assert output == expected
318
319 @pytest.mark.parametrize(
320 "app,unit,expected",
321 [
322 ("nova-cloud-controller", "nova-cloud-controller/1", "hacluster-nova/2"),
323 ("neutron-api", "neutron-api/2", "hacluster-neutron/2"),
324 ("nova-compute-kvm", "nova-compute-kvm/1", None), # missing HA
325 ("memcached", "memcached/1", None), # missing subordinates
326 ],
327 )
328 def test_find_hacluster_for_unit(self, app, unit, expected):
329 output = c.find_hacluster_for_unit(app, unit, JUJU_FILE_DICT)
330 assert output == expected
331
332
333class TestOpenstackManagedUpgrade:
334 @patch("builtins.print")
335 @patch(
336 "cloud_upgrade_planner.common.find_units_for_app",
337 return_value=["neutron-api/0", "neutron-api/1", "neutron-api/2"],
338 )
339 @patch(
340 "cloud_upgrade_planner.common.find_leader_for_app", return_value="neutron-api/2"
341 )
342 def test_plan_action_managed_phase_app(self, mock_leader, mock_units, mock_stdout):
343 o.plan_action_managed_phase_app("test-app", JUJU_FILE_DICT, "test-release")
344 calls = [
345 call(
346 " juju config test-app action-managed-upgrade=true openstack-origin=test-release"
347 ),
348 call(" {}".format(o.WATCH_COMMAND)),
349 call(" juju run-action --wait neutron-api/2 openstack-upgrade"),
350 call(" juju run-action --wait neutron-api/0 openstack-upgrade"),
351 call(" juju run-action --wait neutron-api/1 openstack-upgrade"),
352 ]
353 assert mock_stdout.mock_calls == calls
354
355 @patch("builtins.print")
356 def test_plan_charm_upgrade_local_path(self, mock_stdout):
357 output = o.plan_charm_upgrade("juju-lint", JUJU_FILE_DICT, SVC2CHARM)
358 calls = [
359 call(
360 "\n WARNING!!! Application juju-lint has local charm path local:focal/juju-lint-1\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in local charm are in upstream charm before\n running upgrade-charm on this application.\n "
361 ),
362 call(" juju upgrade-charm juju-lint --switch cs:juju-lint"),
363 call(
364 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n"
365 ),
366 ]
367 assert mock_stdout.mock_calls == calls
368
369 @patch("builtins.print")
370 def test_plan_charm_upgrade_cs_path(self, mock_stdout):
371 output = o.plan_charm_upgrade("neutron-api", JUJU_FILE_DICT, SVC2CHARM)
372 calls = [
373 call(" juju upgrade-charm neutron-api"),
374 call(
375 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n"
376 ),
377 ]
378 assert mock_stdout.mock_calls == calls
379
380 @patch("builtins.print")
381 def test_plan_charm_upgrade_tilde(self, mock_stdout):
382 output = o.plan_charm_upgrade("easyrsa", JUJU_FILE_DICT, SVC2CHARM)
383 calls = [
384 call(
385 "\n WARNING!!! Application easyrsa from unrecognized path cs:~containers/easyrsa-345\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n "
386 ),
387 call(" juju upgrade-charm easyrsa --switch cs:easyrsa"),
388 call(
389 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n"
390 ),
391 ]
392 assert mock_stdout.mock_calls == calls
393
394 @patch("builtins.print")
395 @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK)
396 def test_plan_charm_upgrade_deferred(self, defer_file, mock_stdout):
397 output = o.plan_charm_upgrade("rabbitmq-server", JUJU_FILE_DICT, SVC2CHARM)
398 calls = [
399 call(
400 "\n WARNING!!! Application rabbitmq-server from unrecognized path cs:~llama-charmers-next/rabbitmq-server-5\n Suggesting switch to latest promulgated cs: version.\n Please ensure updates in the above branched charm are in the latest\n promulgated charm before running upgrade-charm on this application.\n "
401 ),
402 call("rabbitmq-server:\n enable-auto-restarts: False", file=defer_file()),
403 call(
404 " juju upgrade-charm rabbitmq-server --switch cs:rabbitmq-server --config deferred_restart_config_rabbitmq-server.yaml"
405 ),
406 call(
407 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\"\n\n"
408 ),
409 ]
410 assert mock_stdout.mock_calls == calls
411
412
413class TestUbuntuSeriesUpgrade:
414 def test_main_missing_juju_file(self):
415 with pytest.raises(SystemExit):
416 u.main()
417
418 @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_sequential")
419 @patch("cloud_upgrade_planner.ubuntu_series_upgrade_for_openstack.plan_parallel")
420 @patch(
421 "cloud_upgrade_planner.common.find_apps_from_charm", return_value=["app"]
422 )
423 @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK)
424 def test_main_with_juju_file(
425 self, juju_file, mock_apps_from_charm, mock_parallel, mock_sequential
426 ):
427 u.main()
428
429 charm_calls = []
430 for phase in range(2):
431 for app in u.PHASE_APPS[phase]:
432 charm_calls.append(call(app, JUJU_FILE_DICT))
433 mock_apps_from_charm.assert_has_calls(charm_calls)
434
435 parallel_apps_no = len(u.PHASE_APPS[1])
436 mock_parallel.assert_called_once_with(
437 JUJU_FILE_DICT, ["app"] * parallel_apps_no, []
438 )
439
440 sequential_apps_no = len(u.PHASE_APPS[0]) + len(u.PHASE_APPS[2])
441 seq_calls = [call(JUJU_FILE_DICT, "app", []) for _ in range(sequential_apps_no)]
442 mock_sequential.assert_has_calls(seq_calls)
443
444 @patch("builtins.print")
445 @patch("builtins.open", new_callable=mock_open, read_data=JUJU_FILE_MOCK)
446 def test_main_output(self, juju_file, mock_stdout):
447 u.main()
448 calls = [
449 call(u.PRE_UPGRADE_STEPS),
450 call("Phase 0\n========"),
451 call(" App: rabbitmq-server"),
452 call(" Units rabbitmq-server/5 rabbitmq-server/4 rabbitmq-server/6"),
453 call(
454 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\""
455 ),
456 call(" juju set-series rabbitmq-server bionic"),
457 call(" juju run-action --wait rabbitmq-server/4 pause"),
458 call(" juju run-action --wait rabbitmq-server/6 pause"),
459 call(" juju upgrade-series 7/lxd/12 prepare bionic -y"),
460 call(" juju scp local 7/lxd/12:"),
461 call(
462 " juju run --machine 7/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
463 ),
464 call(
465 " juju run --machine 7/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
466 ),
467 call(" juju run --machine 7/lxd/12 --timeout=5m 'sudo init 6'"),
468 call(
469 " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/5'"
470 ),
471 call(" juju config rabbitmq-server source=distro"),
472 call(" sleep 120; jw"),
473 call(" juju upgrade-series 7/lxd/12 complete"),
474 call(" juju upgrade-series 6/lxd/12 prepare bionic -y"),
475 call(" juju scp local 6/lxd/12:"),
476 call(
477 " juju run --machine 6/lxd/12 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
478 ),
479 call(
480 " juju run --machine 6/lxd/12 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
481 ),
482 call(" juju run --machine 6/lxd/12 --timeout=5m 'sudo init 6'"),
483 call(
484 " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/4'"
485 ),
486 call(" juju upgrade-series 6/lxd/12 complete"),
487 call(" juju upgrade-series 8/lxd/11 prepare bionic -y"),
488 call(" juju scp local 8/lxd/11:"),
489 call(
490 " juju run --machine 8/lxd/11 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
491 ),
492 call(
493 " juju run --machine 8/lxd/11 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
494 ),
495 call(" juju run --machine 8/lxd/11 --timeout=5m 'sudo init 6'"),
496 call(
497 " sleep 120; jw; echo 'Check status of workload for rabbitmq-server/6'"
498 ),
499 call(" juju upgrade-series 8/lxd/11 complete"),
500 call(
501 " juju run-action --wait rabbitmq-server/5 complete-cluster-series-upgrade"
502 ),
503 call("\n"),
504 call(" App: vault"),
505 call(" Units vault/1 vault/0 vault/2"),
506 call(
507 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\""
508 ),
509 call(" juju set-series vault bionic"),
510 call(" juju run-action --wait hacluster-vault/1 pause"),
511 call(" juju run-action --wait hacluster-vault/0 pause"),
512 call(" juju run-action --wait vault/0 pause"),
513 call(" juju run-action --wait vault/2 pause"),
514 call(" juju upgrade-series 1 prepare bionic -y"),
515 call(" juju scp local 1:"),
516 call(
517 " juju run --machine 1 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
518 ),
519 call(
520 " juju run --machine 1 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
521 ),
522 call(" juju run --machine 1 --timeout=5m 'sudo init 6'"),
523 call(" sleep 120; jw; echo 'Check status of workload for vault/1'"),
524 call(" sleep 120; jw"),
525 call(" juju upgrade-series 1 complete"),
526 call(" juju upgrade-series 0 prepare bionic -y"),
527 call(" juju scp local 0:"),
528 call(
529 " juju run --machine 0 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
530 ),
531 call(
532 " juju run --machine 0 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
533 ),
534 call(" juju run --machine 0 --timeout=5m 'sudo init 6'"),
535 call(" sleep 120; jw; echo 'Check status of workload for vault/0'"),
536 call(" juju upgrade-series 0 complete"),
537 call(" juju upgrade-series 2 prepare bionic -y"),
538 call(" juju scp local 2:"),
539 call(
540 " juju run --machine 2 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
541 ),
542 call(
543 " juju run --machine 2 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
544 ),
545 call(" juju run --machine 2 --timeout=5m 'sudo init 6'"),
546 call(" sleep 120; jw; echo 'Check status of workload for vault/2'"),
547 call(" juju upgrade-series 2 complete"),
548 call(
549 " ## Manual step: You will need to unseal the vault after each unit reboots"
550 ),
551 call("\n"),
552 call("Phase 1\n========"),
553 call(" juju set-series neutron-api bionic"),
554 call(" juju set-series nova-cloud-controller bionic"),
555 call(" juju set-series memcached bionic"),
556 call(" juju set-series easyrsa bionic"),
557 call(" # Pause non-leaders"),
558 call(
559 " juju run-action --wait hacluster-neutron/0 pause\n juju run-action --wait hacluster-neutron/1 pause\n juju run-action --wait hacluster-nova/0 pause\n juju run-action --wait hacluster-nova/2 pause"
560 ),
561 call(
562 " juju run-action --wait neutron-api/0 pause\n juju run-action --wait neutron-api/1 pause\n juju run-action --wait nova-cloud-controller/0 pause\n juju run-action --wait nova-cloud-controller/1 pause"
563 ),
564 call(
565 " # Prepare leaders - These can be done in parallel before non-leaders"
566 ),
567 call(
568 " juju upgrade-series 8/lxd/5 prepare bionic -y # neutron-api/2\n juju upgrade-series 8/lxd/6 prepare bionic -y # nova-cloud-controller/2\n juju upgrade-series 5/lxd/4 prepare bionic -y # memcached/1\n juju upgrade-series 7/lxd/3 prepare bionic -y # easyrsa/0"
569 ),
570 call(
571 " # Prepare non-leaders - These can be done in parallel after leaders"
572 ),
573 call(
574 " juju upgrade-series 6/lxd/5 prepare bionic -y # neutron-api/0\n juju upgrade-series 7/lxd/6 prepare bionic -y # neutron-api/1\n juju upgrade-series 6/lxd/6 prepare bionic -y # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 prepare bionic -y # nova-cloud-controller/1"
575 ),
576 call(
577 " for i in 6/lxd/5 7/lxd/6 8/lxd/5 6/lxd/6 7/lxd/7 8/lxd/6 5/lxd/4 7/lxd/3; do juju scp local $i: ; done"
578 ),
579 call(" # Update all machines to latest patches"),
580 call(
581 " ### IF YOU HAVE A VERSION OF JUJU THAT SHOWS 'action terminated' on some units when running against multiple units, upgrade juju agents before this step"
582 ),
583 call(
584 " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=20m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
585 ),
586 call(" # Perform do-release-upgrade"),
587 call(
588 " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
589 ),
590 call(
591 " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot"
592 ),
593 call(
594 " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot"
595 ),
596 call(
597 " #### Check above and reboot and re-run d-r-u for any machines that did not upgrade due to needing reboot"
598 ),
599 call(" # Reboot all API machines into bionic"),
600 call(
601 " juju run --machine 6/lxd/5,7/lxd/6,8/lxd/5,6/lxd/6,7/lxd/7,8/lxd/6,5/lxd/4,7/lxd/3 --timeout=10m 'sudo init 6'"
602 ),
603 call(" # Post Upgrade tasks"),
604 call(
605 " juju config neutron-api openstack-origin=distro\n juju config nova-cloud-controller openstack-origin=distro\n juju config memcached openstack-origin=distro\n juju config easyrsa openstack-origin=distro"
606 ),
607 call(
608 " # Complete leaders - These can all be done in parallel before non-leaders"
609 ),
610 call(
611 " echo 'Check status of workload for neutron-api/2'\n juju upgrade-series 8/lxd/5 complete # neutron-api/2\n echo 'Check status of workload for nova-cloud-controller/2'\n juju upgrade-series 8/lxd/6 complete # nova-cloud-controller/2\n echo 'Check status of workload for memcached/1'\n juju upgrade-series 5/lxd/4 complete # memcached/1\n echo 'Check status of workload for easyrsa/0'\n juju upgrade-series 7/lxd/3 complete # easyrsa/0"
612 ),
613 call(
614 " # Complete non-leaders - These can all be done in parallel after leaders complete"
615 ),
616 call(
617 " juju upgrade-series 6/lxd/5 complete # neutron-api/0\n juju upgrade-series 7/lxd/6 complete # neutron-api/1\n juju upgrade-series 6/lxd/6 complete # nova-cloud-controller/0\n juju upgrade-series 7/lxd/7 complete # nova-cloud-controller/1"
618 ),
619 call(" # Post App Upgrade cleanup tasks"),
620 call(""),
621 call("\n"),
622 call("Phase 2\n========"),
623 call(" App: nova-compute-kvm"),
624 call(
625 " Units nova-compute-kvm/7 nova-compute-kvm/1 nova-compute-kvm/10 nova-compute-kvm/11 nova-compute-kvm/12 nova-compute-kvm/13 nova-compute-kvm/9"
626 ),
627 call(
628 " watch \"juju status|egrep 'blocked|waiting|maint|error|hook|lost|executing'\""
629 ),
630 call(" juju set-series nova-compute-kvm bionic"),
631 call(" juju upgrade-series 16 prepare bionic -y"),
632 call(" juju scp local 16:"),
633 call(
634 " juju run --machine 16 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
635 ),
636 call(
637 " juju run --machine 16 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
638 ),
639 call(" juju run --machine 16 --timeout=5m 'sudo init 6'"),
640 call(
641 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/7'"
642 ),
643 call(" juju config nova-compute-kvm openstack-origin=distro"),
644 call(" sleep 120; jw"),
645 call(" juju upgrade-series 16 complete"),
646 call(" juju upgrade-series 10 prepare bionic -y"),
647 call(" juju scp local 10:"),
648 call(
649 " juju run --machine 10 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
650 ),
651 call(
652 " juju run --machine 10 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
653 ),
654 call(" juju run --machine 10 --timeout=5m 'sudo init 6'"),
655 call(
656 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/1'"
657 ),
658 call(" juju upgrade-series 10 complete"),
659 call(" juju upgrade-series 19 prepare bionic -y"),
660 call(" juju scp local 19:"),
661 call(
662 " juju run --machine 19 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
663 ),
664 call(
665 " juju run --machine 19 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
666 ),
667 call(" juju run --machine 19 --timeout=5m 'sudo init 6'"),
668 call(
669 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/10'"
670 ),
671 call(" juju upgrade-series 19 complete"),
672 call(" juju upgrade-series 20 prepare bionic -y"),
673 call(" juju scp local 20:"),
674 call(
675 " juju run --machine 20 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
676 ),
677 call(
678 " juju run --machine 20 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
679 ),
680 call(" juju run --machine 20 --timeout=5m 'sudo init 6'"),
681 call(
682 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/11'"
683 ),
684 call(" juju upgrade-series 20 complete"),
685 call(" juju upgrade-series 21 prepare bionic -y"),
686 call(" juju scp local 21:"),
687 call(
688 " juju run --machine 21 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
689 ),
690 call(
691 " juju run --machine 21 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
692 ),
693 call(" juju run --machine 21 --timeout=5m 'sudo init 6'"),
694 call(
695 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/12'"
696 ),
697 call(" juju upgrade-series 21 complete"),
698 call(" juju upgrade-series 22 prepare bionic -y"),
699 call(" juju scp local 22:"),
700 call(
701 " juju run --machine 22 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
702 ),
703 call(
704 " juju run --machine 22 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
705 ),
706 call(" juju run --machine 22 --timeout=5m 'sudo init 6'"),
707 call(
708 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/13'"
709 ),
710 call(" juju upgrade-series 22 complete"),
711 call(" juju upgrade-series 18 prepare bionic -y"),
712 call(" juju scp local 18:"),
713 call(
714 " juju run --machine 18 --timeout=60m 'sudo service cron stop; sudo service landscape-client stop; sudo cp /home/ubuntu/local /etc/apt/apt.conf.d/local; sudo apt-get update; sudo apt-get dist-upgrade -y; sudo apt-get autoremove -y; sudo rm /var/run/reboot-required /var/run/reboot-required.pkgs'"
715 ),
716 call(
717 " juju run --machine 18 --timeout=60m 'sudo do-release-upgrade -f DistUpgradeViewNonInteractive'"
718 ),
719 call(" juju run --machine 18 --timeout=5m 'sudo init 6'"),
720 call(
721 " sleep 120; jw; echo 'Check status of workload for nova-compute-kvm/9'"
722 ),
723 call(" juju upgrade-series 18 complete"),
724 call("\n"),
725 call(u.POST_UPGRADE_STEPS),
726 ]
727
728 assert mock_stdout.mock_calls == calls

Subscribers

People subscribed via source and target branches