Merge lp:~corey.bryant/charms/trusty/cinder/remove-missing into lp:~openstack-charmers-archive/charms/trusty/cinder/next
- Trusty Tahr (14.04)
- remove-missing
- Merge into next
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 56 | ||||
Proposed branch: | lp:~corey.bryant/charms/trusty/cinder/remove-missing | ||||
Merge into: | lp:~openstack-charmers-archive/charms/trusty/cinder/next | ||||
Diff against target: |
251 lines (+62/-16) 5 files modified
config.yaml (+12/-2) hooks/cinder_hooks.py (+2/-1) hooks/cinder_utils.py (+19/-2) unit_tests/test_cinder_hooks.py (+3/-2) unit_tests/test_cinder_utils.py (+26/-9) |
||||
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/cinder/remove-missing | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Needs Fixing | ||
OpenStack Charmers | Pending | ||
Review via email: mp+237439@code.launchpad.net |
Commit message
Description of the change
OIL has hit failures where physical volumes (from a previous deployment) are missing from the volume group on the current deployment, and vgextend fails. This patch fixes that by adding an option that requests that missing physical volumes be removed from the volume group.
James Page (james-page) : | # |
James Page (james-page) wrote : | # |
James Page (james-page) wrote : | # |
=======
FAIL: It writes out all config
-------
Traceback (most recent call last):
File "/usr/lib/
return func(*args, **keywargs)
File "/home/
False)
File "/usr/lib/
raise AssertionError(msg)
AssertionError: Expected call: configure_
Actual call: configure_
=======
FAIL: It writes out all config
-------
Traceback (most recent call last):
File "/usr/lib/
return func(*args, **keywargs)
File "/home/
True)
File "/usr/lib/
raise AssertionError(msg)
AssertionError: Expected call: configure_
Actual call: configure_
James Page (james-page) wrote : | # |
Probably worth adding a unit test to ensure that switching the config has the desired effect as well.
Preview Diff
1 | === modified file 'config.yaml' | |||
2 | --- config.yaml 2014-10-08 10:42:20 +0000 | |||
3 | +++ config.yaml 2014-10-10 20:37:29 +0000 | |||
4 | @@ -27,12 +27,16 @@ | |||
5 | 27 | description: | | 27 | description: | |
6 | 28 | The block devices on which to create LVM volume group. | 28 | The block devices on which to create LVM volume group. |
7 | 29 | 29 | ||
9 | 30 | May also be set to None for deployments that will not need local | 30 | May be set to None for deployments that will not need local |
10 | 31 | storage (eg, Ceph/RBD-backed volumes). | 31 | storage (eg, Ceph/RBD-backed volumes). |
11 | 32 | 32 | ||
12 | 33 | This can also be a space delimited list of block devices to attempt | 33 | This can also be a space delimited list of block devices to attempt |
13 | 34 | to use in the cinder LVM volume group - each block device detected | 34 | to use in the cinder LVM volume group - each block device detected |
14 | 35 | will be added to the available physical volumes in the volume group. | 35 | will be added to the available physical volumes in the volume group. |
15 | 36 | |||
16 | 37 | May be set to the path and size of a local file | ||
17 | 38 | (/path/to/file.img|$sizeG), which will be created and used as a | ||
18 | 39 | loopback device (for testing only). $sizeG defaults to 5G | ||
19 | 36 | ceph-osd-replication-count: | 40 | ceph-osd-replication-count: |
20 | 37 | default: 3 | 41 | default: 3 |
21 | 38 | type: int | 42 | type: int |
22 | @@ -51,8 +55,14 @@ | |||
23 | 51 | default: "false" | 55 | default: "false" |
24 | 52 | type: string | 56 | type: string |
25 | 53 | description: | | 57 | description: | |
27 | 54 | If true, charm will attempt to overwrite block devices containin | 58 | If true, charm will attempt to overwrite block devices containing |
28 | 55 | previous filesystems or LVM, assuming it is not in use. | 59 | previous filesystems or LVM, assuming it is not in use. |
29 | 60 | remove-missing: | ||
30 | 61 | default: False | ||
31 | 62 | type: boolean | ||
32 | 63 | description: | | ||
33 | 64 | If True, charm will attempt to remove missing physical volumes from | ||
34 | 65 | volume group, if logical volumes are not allocated on them. | ||
35 | 56 | database-user: | 66 | database-user: |
36 | 57 | default: cinder | 67 | default: cinder |
37 | 58 | type: string | 68 | type: string |
38 | 59 | 69 | ||
39 | === modified file 'hooks/cinder_hooks.py' | |||
40 | --- hooks/cinder_hooks.py 2014-10-01 22:15:34 +0000 | |||
41 | +++ hooks/cinder_hooks.py 2014-10-10 20:37:29 +0000 | |||
42 | @@ -105,7 +105,8 @@ | |||
43 | 105 | block_devices = conf['block-device'].split() | 105 | block_devices = conf['block-device'].split() |
44 | 106 | configure_lvm_storage(block_devices, | 106 | configure_lvm_storage(block_devices, |
45 | 107 | conf['volume-group'], | 107 | conf['volume-group'], |
47 | 108 | conf['overwrite'] in ['true', 'True', True]) | 108 | conf['overwrite'] in ['true', 'True', True], |
48 | 109 | conf['remove-missing']) | ||
49 | 109 | 110 | ||
50 | 110 | if openstack_upgrade_available('cinder-common'): | 111 | if openstack_upgrade_available('cinder-common'): |
51 | 111 | do_openstack_upgrade(configs=CONFIGS) | 112 | do_openstack_upgrade(configs=CONFIGS) |
52 | 112 | 113 | ||
53 | === modified file 'hooks/cinder_utils.py' | |||
54 | --- hooks/cinder_utils.py 2014-10-07 12:27:23 +0000 | |||
55 | +++ hooks/cinder_utils.py 2014-10-10 20:37:29 +0000 | |||
56 | @@ -267,9 +267,19 @@ | |||
57 | 267 | return list(set(_services)) | 267 | return list(set(_services)) |
58 | 268 | 268 | ||
59 | 269 | 269 | ||
60 | 270 | def reduce_lvm_volume_group_missing(volume_group): | ||
61 | 271 | ''' | ||
62 | 272 | Remove all missing physical volumes from the volume group, if there | ||
63 | 273 | are no logical volumes allocated on them. | ||
64 | 274 | |||
65 | 275 | :param volume_group: str: Name of volume group to reduce. | ||
66 | 276 | ''' | ||
67 | 277 | subprocess.check_call(['vgreduce', '--removemissing', volume_group]) | ||
68 | 278 | |||
69 | 279 | |||
70 | 270 | def extend_lvm_volume_group(volume_group, block_device): | 280 | def extend_lvm_volume_group(volume_group, block_device): |
71 | 271 | ''' | 281 | ''' |
73 | 272 | Extend and LVM volume group onto a given block device. | 282 | Extend an LVM volume group onto a given block device. |
74 | 273 | 283 | ||
75 | 274 | Assumes block device has already been initialized as an LVM PV. | 284 | Assumes block device has already been initialized as an LVM PV. |
76 | 275 | 285 | ||
77 | @@ -279,13 +289,16 @@ | |||
78 | 279 | subprocess.check_call(['vgextend', volume_group, block_device]) | 289 | subprocess.check_call(['vgextend', volume_group, block_device]) |
79 | 280 | 290 | ||
80 | 281 | 291 | ||
82 | 282 | def configure_lvm_storage(block_devices, volume_group, overwrite=False): | 292 | def configure_lvm_storage(block_devices, volume_group, overwrite=False, |
83 | 293 | remove_missing=False): | ||
84 | 283 | ''' Configure LVM storage on the list of block devices provided | 294 | ''' Configure LVM storage on the list of block devices provided |
85 | 284 | 295 | ||
86 | 285 | :param block_devices: list: List of whitelisted block devices to detect | 296 | :param block_devices: list: List of whitelisted block devices to detect |
87 | 286 | and use if found | 297 | and use if found |
88 | 287 | :param overwrite: bool: Scrub any existing block data if block device is | 298 | :param overwrite: bool: Scrub any existing block data if block device is |
89 | 288 | not already in-use | 299 | not already in-use |
90 | 300 | :param remove_missing: bool: Remove missing physical volumes from volume | ||
91 | 301 | group if logical volume not allocated on them | ||
92 | 289 | ''' | 302 | ''' |
93 | 290 | devices = [] | 303 | devices = [] |
94 | 291 | for block_device in block_devices: | 304 | for block_device in block_devices: |
95 | @@ -320,6 +333,10 @@ | |||
96 | 320 | create_lvm_volume_group(volume_group, new_devices[0]) | 333 | create_lvm_volume_group(volume_group, new_devices[0]) |
97 | 321 | new_devices.remove(new_devices[0]) | 334 | new_devices.remove(new_devices[0]) |
98 | 322 | 335 | ||
99 | 336 | # Remove missing physical volumes from volume group | ||
100 | 337 | if remove_missing: | ||
101 | 338 | reduce_lvm_volume_group_missing(volume_group) | ||
102 | 339 | |||
103 | 323 | if len(new_devices) > 0: | 340 | if len(new_devices) > 0: |
104 | 324 | # Extend the volume group as required | 341 | # Extend the volume group as required |
105 | 325 | for new_device in new_devices: | 342 | for new_device in new_devices: |
106 | 326 | 343 | ||
107 | === modified file 'unit_tests/test_cinder_hooks.py' | |||
108 | --- unit_tests/test_cinder_hooks.py 2014-10-08 10:53:24 +0000 | |||
109 | +++ unit_tests/test_cinder_hooks.py 2014-10-10 20:37:29 +0000 | |||
110 | @@ -115,7 +115,7 @@ | |||
111 | 115 | self.assertTrue(conf_https.called) | 115 | self.assertTrue(conf_https.called) |
112 | 116 | self.configure_lvm_storage.assert_called_with(['sdb'], | 116 | self.configure_lvm_storage.assert_called_with(['sdb'], |
113 | 117 | 'cinder-volumes', | 117 | 'cinder-volumes', |
115 | 118 | False) | 118 | False, False) |
116 | 119 | 119 | ||
117 | 120 | @patch.object(hooks, 'configure_https') | 120 | @patch.object(hooks, 'configure_https') |
118 | 121 | def test_config_changed_block_devices(self, conf_https): | 121 | def test_config_changed_block_devices(self, conf_https): |
119 | @@ -124,13 +124,14 @@ | |||
120 | 124 | self.test_config.set('block-device', 'sdb /dev/sdc sde') | 124 | self.test_config.set('block-device', 'sdb /dev/sdc sde') |
121 | 125 | self.test_config.set('volume-group', 'cinder-new') | 125 | self.test_config.set('volume-group', 'cinder-new') |
122 | 126 | self.test_config.set('overwrite', 'True') | 126 | self.test_config.set('overwrite', 'True') |
123 | 127 | self.test_config.set('remove-missing', True) | ||
124 | 127 | hooks.hooks.execute(['hooks/config-changed']) | 128 | hooks.hooks.execute(['hooks/config-changed']) |
125 | 128 | self.assertTrue(self.CONFIGS.write_all.called) | 129 | self.assertTrue(self.CONFIGS.write_all.called) |
126 | 129 | self.assertTrue(conf_https.called) | 130 | self.assertTrue(conf_https.called) |
127 | 130 | self.configure_lvm_storage.assert_called_with( | 131 | self.configure_lvm_storage.assert_called_with( |
128 | 131 | ['sdb', '/dev/sdc', 'sde'], | 132 | ['sdb', '/dev/sdc', 'sde'], |
129 | 132 | 'cinder-new', | 133 | 'cinder-new', |
131 | 133 | True) | 134 | True, True) |
132 | 134 | 135 | ||
133 | 135 | @patch.object(hooks, 'configure_https') | 136 | @patch.object(hooks, 'configure_https') |
134 | 136 | def test_config_changed_upgrade_available(self, conf_https): | 137 | def test_config_changed_upgrade_available(self, conf_https): |
135 | 137 | 138 | ||
136 | === modified file 'unit_tests/test_cinder_utils.py' | |||
137 | --- unit_tests/test_cinder_utils.py 2014-04-14 14:41:11 +0000 | |||
138 | +++ unit_tests/test_cinder_utils.py 2014-10-10 20:37:29 +0000 | |||
139 | @@ -211,11 +211,12 @@ | |||
140 | 211 | ('/mnt/loop0', cinder_utils.DEFAULT_LOOPBACK_SIZE)) | 211 | ('/mnt/loop0', cinder_utils.DEFAULT_LOOPBACK_SIZE)) |
141 | 212 | 212 | ||
142 | 213 | @patch.object(cinder_utils, 'clean_storage') | 213 | @patch.object(cinder_utils, 'clean_storage') |
143 | 214 | @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') | ||
144 | 214 | @patch.object(cinder_utils, 'extend_lvm_volume_group') | 215 | @patch.object(cinder_utils, 'extend_lvm_volume_group') |
146 | 215 | def test_configure_lvm_storage(self, extend_lvm, clean_storage): | 216 | def test_configure_lvm_storage(self, extend_lvm, reduce_lvm, clean_storage): |
147 | 216 | devices = ['/dev/vdb', '/dev/vdc'] | 217 | devices = ['/dev/vdb', '/dev/vdc'] |
148 | 217 | self.is_lvm_physical_volume.return_value = False | 218 | self.is_lvm_physical_volume.return_value = False |
150 | 218 | cinder_utils.configure_lvm_storage(devices, 'test', True) | 219 | cinder_utils.configure_lvm_storage(devices, 'test', True, True) |
151 | 219 | clean_storage.assert_has_calls( | 220 | clean_storage.assert_has_calls( |
152 | 220 | [call('/dev/vdb'), | 221 | [call('/dev/vdb'), |
153 | 221 | call('/dev/vdc')] | 222 | call('/dev/vdc')] |
154 | @@ -225,24 +226,29 @@ | |||
155 | 225 | call('/dev/vdc')] | 226 | call('/dev/vdc')] |
156 | 226 | ) | 227 | ) |
157 | 227 | self.create_lvm_volume_group.assert_called_with('test', '/dev/vdb') | 228 | self.create_lvm_volume_group.assert_called_with('test', '/dev/vdb') |
158 | 229 | reduce_lvm.assert_called_with('test') | ||
159 | 228 | extend_lvm.assert_called_with('test', '/dev/vdc') | 230 | extend_lvm.assert_called_with('test', '/dev/vdc') |
160 | 229 | 231 | ||
161 | 230 | @patch.object(cinder_utils, 'clean_storage') | 232 | @patch.object(cinder_utils, 'clean_storage') |
162 | 233 | @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') | ||
163 | 231 | @patch.object(cinder_utils, 'extend_lvm_volume_group') | 234 | @patch.object(cinder_utils, 'extend_lvm_volume_group') |
165 | 232 | def test_configure_lvm_storage_loopback(self, extend_lvm, clean_storage): | 235 | def test_configure_lvm_storage_loopback(self, extend_lvm, reduce_lvm, |
166 | 236 | clean_storage): | ||
167 | 233 | devices = ['/mnt/loop0|10'] | 237 | devices = ['/mnt/loop0|10'] |
168 | 234 | self.ensure_loopback_device.return_value = '/dev/loop0' | 238 | self.ensure_loopback_device.return_value = '/dev/loop0' |
169 | 235 | self.is_lvm_physical_volume.return_value = False | 239 | self.is_lvm_physical_volume.return_value = False |
171 | 236 | cinder_utils.configure_lvm_storage(devices, 'test', True) | 240 | cinder_utils.configure_lvm_storage(devices, 'test', True, True) |
172 | 237 | clean_storage.assert_called_with('/dev/loop0') | 241 | clean_storage.assert_called_with('/dev/loop0') |
173 | 238 | self.ensure_loopback_device.assert_called_with('/mnt/loop0', '10') | 242 | self.ensure_loopback_device.assert_called_with('/mnt/loop0', '10') |
174 | 239 | self.create_lvm_physical_volume.assert_called_with('/dev/loop0') | 243 | self.create_lvm_physical_volume.assert_called_with('/dev/loop0') |
175 | 240 | self.create_lvm_volume_group.assert_called_with('test', '/dev/loop0') | 244 | self.create_lvm_volume_group.assert_called_with('test', '/dev/loop0') |
176 | 245 | reduce_lvm.assert_called_with('test') | ||
177 | 241 | self.assertFalse(extend_lvm.called) | 246 | self.assertFalse(extend_lvm.called) |
178 | 242 | 247 | ||
179 | 243 | @patch.object(cinder_utils, 'clean_storage') | 248 | @patch.object(cinder_utils, 'clean_storage') |
180 | 249 | @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') | ||
181 | 244 | @patch.object(cinder_utils, 'extend_lvm_volume_group') | 250 | @patch.object(cinder_utils, 'extend_lvm_volume_group') |
183 | 245 | def test_configure_lvm_storage_existing_vg(self, extend_lvm, | 251 | def test_configure_lvm_storage_existing_vg(self, extend_lvm, reduce_lvm, |
184 | 246 | clean_storage): | 252 | clean_storage): |
185 | 247 | def pv_lookup(device): | 253 | def pv_lookup(device): |
186 | 248 | devices = { | 254 | devices = { |
187 | @@ -260,19 +266,21 @@ | |||
188 | 260 | devices = ['/dev/vdb', '/dev/vdc'] | 266 | devices = ['/dev/vdb', '/dev/vdc'] |
189 | 261 | self.is_lvm_physical_volume.side_effect = pv_lookup | 267 | self.is_lvm_physical_volume.side_effect = pv_lookup |
190 | 262 | self.list_lvm_volume_group.side_effect = vg_lookup | 268 | self.list_lvm_volume_group.side_effect = vg_lookup |
192 | 263 | cinder_utils.configure_lvm_storage(devices, 'test', True) | 269 | cinder_utils.configure_lvm_storage(devices, 'test', True, True) |
193 | 264 | clean_storage.assert_has_calls( | 270 | clean_storage.assert_has_calls( |
194 | 265 | [call('/dev/vdc')] | 271 | [call('/dev/vdc')] |
195 | 266 | ) | 272 | ) |
196 | 267 | self.create_lvm_physical_volume.assert_has_calls( | 273 | self.create_lvm_physical_volume.assert_has_calls( |
197 | 268 | [call('/dev/vdc')] | 274 | [call('/dev/vdc')] |
198 | 269 | ) | 275 | ) |
199 | 276 | reduce_lvm.assert_called_with('test') | ||
200 | 270 | extend_lvm.assert_called_with('test', '/dev/vdc') | 277 | extend_lvm.assert_called_with('test', '/dev/vdc') |
201 | 271 | self.assertFalse(self.create_lvm_volume_group.called) | 278 | self.assertFalse(self.create_lvm_volume_group.called) |
202 | 272 | 279 | ||
203 | 273 | @patch.object(cinder_utils, 'clean_storage') | 280 | @patch.object(cinder_utils, 'clean_storage') |
204 | 281 | @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') | ||
205 | 274 | @patch.object(cinder_utils, 'extend_lvm_volume_group') | 282 | @patch.object(cinder_utils, 'extend_lvm_volume_group') |
207 | 275 | def test_configure_lvm_storage_different_vg(self, extend_lvm, | 283 | def test_configure_lvm_storage_different_vg(self, extend_lvm, reduce_lvm, |
208 | 276 | clean_storage): | 284 | clean_storage): |
209 | 277 | def pv_lookup(device): | 285 | def pv_lookup(device): |
210 | 278 | devices = { | 286 | devices = { |
211 | @@ -290,15 +298,18 @@ | |||
212 | 290 | devices = ['/dev/vdb', '/dev/vdc'] | 298 | devices = ['/dev/vdb', '/dev/vdc'] |
213 | 291 | self.is_lvm_physical_volume.side_effect = pv_lookup | 299 | self.is_lvm_physical_volume.side_effect = pv_lookup |
214 | 292 | self.list_lvm_volume_group.side_effect = vg_lookup | 300 | self.list_lvm_volume_group.side_effect = vg_lookup |
216 | 293 | cinder_utils.configure_lvm_storage(devices, 'test', True) | 301 | cinder_utils.configure_lvm_storage(devices, 'test', True, True) |
217 | 294 | clean_storage.assert_called_with('/dev/vdc') | 302 | clean_storage.assert_called_with('/dev/vdc') |
218 | 295 | self.create_lvm_physical_volume.assert_called_with('/dev/vdc') | 303 | self.create_lvm_physical_volume.assert_called_with('/dev/vdc') |
219 | 304 | reduce_lvm.assert_called_with('test') | ||
220 | 296 | extend_lvm.assert_called_with('test', '/dev/vdc') | 305 | extend_lvm.assert_called_with('test', '/dev/vdc') |
221 | 297 | self.assertFalse(self.create_lvm_volume_group.called) | 306 | self.assertFalse(self.create_lvm_volume_group.called) |
222 | 298 | 307 | ||
223 | 299 | @patch.object(cinder_utils, 'clean_storage') | 308 | @patch.object(cinder_utils, 'clean_storage') |
224 | 309 | @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') | ||
225 | 300 | @patch.object(cinder_utils, 'extend_lvm_volume_group') | 310 | @patch.object(cinder_utils, 'extend_lvm_volume_group') |
226 | 301 | def test_configure_lvm_storage_different_vg_ignore(self, extend_lvm, | 311 | def test_configure_lvm_storage_different_vg_ignore(self, extend_lvm, |
227 | 312 | reduce_lvm, | ||
228 | 302 | clean_storage): | 313 | clean_storage): |
229 | 303 | def pv_lookup(device): | 314 | def pv_lookup(device): |
230 | 304 | devices = { | 315 | devices = { |
231 | @@ -316,13 +327,19 @@ | |||
232 | 316 | devices = ['/dev/vdb', '/dev/vdc'] | 327 | devices = ['/dev/vdb', '/dev/vdc'] |
233 | 317 | self.is_lvm_physical_volume.side_effect = pv_lookup | 328 | self.is_lvm_physical_volume.side_effect = pv_lookup |
234 | 318 | self.list_lvm_volume_group.side_effect = vg_lookup | 329 | self.list_lvm_volume_group.side_effect = vg_lookup |
236 | 319 | cinder_utils.configure_lvm_storage(devices, 'test', False) | 330 | cinder_utils.configure_lvm_storage(devices, 'test', False, False) |
237 | 320 | self.assertFalse(clean_storage.called) | 331 | self.assertFalse(clean_storage.called) |
238 | 321 | self.assertFalse(self.create_lvm_physical_volume.called) | 332 | self.assertFalse(self.create_lvm_physical_volume.called) |
239 | 333 | self.assertFalse(reduce_lvm.called) | ||
240 | 322 | self.assertFalse(extend_lvm.called) | 334 | self.assertFalse(extend_lvm.called) |
241 | 323 | self.assertFalse(self.create_lvm_volume_group.called) | 335 | self.assertFalse(self.create_lvm_volume_group.called) |
242 | 324 | 336 | ||
243 | 325 | @patch('subprocess.check_call') | 337 | @patch('subprocess.check_call') |
244 | 338 | def test_reduce_lvm_volume_group_missing(self, _call): | ||
245 | 339 | cinder_utils.reduce_lvm_volume_group_missing('test') | ||
246 | 340 | _call.assert_called_with(['vgreduce', '--removemissing', 'test']) | ||
247 | 341 | |||
248 | 342 | @patch('subprocess.check_call') | ||
249 | 326 | def test_extend_lvm_volume_group(self, _call): | 343 | def test_extend_lvm_volume_group(self, _call): |
250 | 327 | cinder_utils.extend_lvm_volume_group('test', '/dev/sdb') | 344 | cinder_utils.extend_lvm_volume_group('test', '/dev/sdb') |
251 | 328 | _call.assert_called_with(['vgextend', 'test', '/dev/sdb']) | 345 | _call.assert_called_with(['vgextend', 'test', '/dev/sdb']) |
Marking WIP - set back to 'Needs review' when ready for re-review.