Merge lp:~james-page/charm-helpers/ceph-redux into lp:charm-helpers
- ceph-redux
- Merge into devel
Status: | Merged |
---|---|
Merged at revision: | 77 |
Proposed branch: | lp:~james-page/charm-helpers/ceph-redux |
Merge into: | lp:charm-helpers |
Diff against target: |
1104 lines (+721/-314) 4 files modified
charmhelpers/contrib/hahelpers/ceph.py (+0/-294) charmhelpers/contrib/storage/linux/ceph.py (+336/-0) tests/contrib/storage/test_linux_ceph.py (+351/-20) tests/helpers.py (+34/-0) |
To merge this branch: | bzr merge lp:~james-page/charm-helpers/ceph-redux |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Adam Gandelman (community) | Approve | ||
Review via email: mp+179948@code.launchpad.net |
Commit message
Description of the change
Redux of ceph helper
Include unit testing and re-location to a more sensible location.
- 56. By James Page
-
Rebase on trunk
- 57. By James Page
-
Tidy lint in test_linux_ceph
- 58. By James Page
-
Rebase on trunk, fixup test errors
James Page (james-page) wrote : | # |
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
On 23/09/13 18:18, Adam Gandelman wrote:
> LGTM. At this point we should probably have all tests that use
> patch_open(
Agreed - I'll find some time todo a global refactor of that bit.
- --
James Page
Ubuntu and Debian Developer
<email address hidden>
<email address hidden>
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.14 (GNU/Linux)
Comment: Using GnuPG with Thunderbird - http://
iQIcBAEBCAAGBQJ
Rffbl+Wc0yZp6Yd
YowB4X9/
0Z7FEqzLrULZ/
jhsLEq5WoKro2Yt
KP/d5+a6limRehw
NUOIFCAOM8ST6hK
Gz2YAUhBWpt1GVE
djPoo6YpOHi9AVf
Nk/lyVRgdzpLdq3
zOLBTV7IHnMBfLI
2W+tTSOeYBPUJh3
=dwG/
-----END PGP SIGNATURE-----
Adam Gandelman (gandelman-a) wrote : | # |
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
On 09/23/2013 01:42 PM, James Page wrote:
> On 23/09/13 18:18, Adam Gandelman wrote:
> > LGTM. At this point we should probably have all tests that use
> > patch_open(
>
> Agreed - I'll find some time todo a global refactor of that bit.
>
Filed this earlier for at least patch_open/
https:/
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.12 (GNU/Linux)
Comment: Using GnuPG with Thunderbird - http://
iQEcBAEBAgAGBQJ
wPP+Q9s6e7bKlrA
9vQZalw6HCwGmmF
T/1RLtJ8+
Bj6vLaIzT1SO1Zs
LGGONAXmPYTGnb7
=h4Sb
-----END PGP SIGNATURE-----
Preview Diff
1 | === removed file 'charmhelpers/contrib/hahelpers/ceph.py' |
2 | --- charmhelpers/contrib/hahelpers/ceph.py 2013-08-21 09:10:58 +0000 |
3 | +++ charmhelpers/contrib/hahelpers/ceph.py 1970-01-01 00:00:00 +0000 |
4 | @@ -1,294 +0,0 @@ |
5 | -# |
6 | -# Copyright 2012 Canonical Ltd. |
7 | -# |
8 | -# This file is sourced from lp:openstack-charm-helpers |
9 | -# |
10 | -# Authors: |
11 | -# James Page <james.page@ubuntu.com> |
12 | -# Adam Gandelman <adamg@ubuntu.com> |
13 | -# |
14 | - |
15 | -import commands |
16 | -import os |
17 | -import shutil |
18 | -import time |
19 | - |
20 | -from subprocess import ( |
21 | - check_call, |
22 | - check_output, |
23 | - CalledProcessError |
24 | -) |
25 | - |
26 | -from charmhelpers.core.hookenv import ( |
27 | - relation_get, |
28 | - relation_ids, |
29 | - related_units, |
30 | - log, |
31 | - INFO, |
32 | - ERROR |
33 | -) |
34 | - |
35 | -from charmhelpers.fetch import ( |
36 | - apt_install, |
37 | -) |
38 | - |
39 | -from charmhelpers.core.host import ( |
40 | - mount, |
41 | - mounts, |
42 | - service_start, |
43 | - service_stop, |
44 | - umount, |
45 | -) |
46 | - |
47 | -KEYRING = '/etc/ceph/ceph.client.%s.keyring' |
48 | -KEYFILE = '/etc/ceph/ceph.client.%s.key' |
49 | - |
50 | -CEPH_CONF = """[global] |
51 | - auth supported = %(auth)s |
52 | - keyring = %(keyring)s |
53 | - mon host = %(mon_hosts)s |
54 | -""" |
55 | - |
56 | - |
57 | -def running(service): |
58 | - # this local util can be dropped as soon the following branch lands |
59 | - # in lp:charm-helpers |
60 | - # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/ |
61 | - try: |
62 | - output = check_output(['service', service, 'status']) |
63 | - except CalledProcessError: |
64 | - return False |
65 | - else: |
66 | - if ("start/running" in output or "is running" in output): |
67 | - return True |
68 | - else: |
69 | - return False |
70 | - |
71 | - |
72 | -def install(): |
73 | - ceph_dir = "/etc/ceph" |
74 | - if not os.path.isdir(ceph_dir): |
75 | - os.mkdir(ceph_dir) |
76 | - apt_install('ceph-common', fatal=True) |
77 | - |
78 | - |
79 | -def rbd_exists(service, pool, rbd_img): |
80 | - (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' % |
81 | - (service, pool)) |
82 | - return rbd_img in out |
83 | - |
84 | - |
85 | -def create_rbd_image(service, pool, image, sizemb): |
86 | - cmd = [ |
87 | - 'rbd', |
88 | - 'create', |
89 | - image, |
90 | - '--size', |
91 | - str(sizemb), |
92 | - '--id', |
93 | - service, |
94 | - '--pool', |
95 | - pool |
96 | - ] |
97 | - check_call(cmd) |
98 | - |
99 | - |
100 | -def pool_exists(service, name): |
101 | - (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) |
102 | - return name in out |
103 | - |
104 | - |
105 | -def create_pool(service, name): |
106 | - cmd = [ |
107 | - 'rados', |
108 | - '--id', |
109 | - service, |
110 | - 'mkpool', |
111 | - name |
112 | - ] |
113 | - check_call(cmd) |
114 | - |
115 | - |
116 | -def keyfile_path(service): |
117 | - return KEYFILE % service |
118 | - |
119 | - |
120 | -def keyring_path(service): |
121 | - return KEYRING % service |
122 | - |
123 | - |
124 | -def create_keyring(service, key): |
125 | - keyring = keyring_path(service) |
126 | - if os.path.exists(keyring): |
127 | - log('ceph: Keyring exists at %s.' % keyring, level=INFO) |
128 | - cmd = [ |
129 | - 'ceph-authtool', |
130 | - keyring, |
131 | - '--create-keyring', |
132 | - '--name=client.%s' % service, |
133 | - '--add-key=%s' % key |
134 | - ] |
135 | - check_call(cmd) |
136 | - log('ceph: Created new ring at %s.' % keyring, level=INFO) |
137 | - |
138 | - |
139 | -def create_key_file(service, key): |
140 | - # create a file containing the key |
141 | - keyfile = keyfile_path(service) |
142 | - if os.path.exists(keyfile): |
143 | - log('ceph: Keyfile exists at %s.' % keyfile, level=INFO) |
144 | - fd = open(keyfile, 'w') |
145 | - fd.write(key) |
146 | - fd.close() |
147 | - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) |
148 | - |
149 | - |
150 | -def get_ceph_nodes(): |
151 | - hosts = [] |
152 | - for r_id in relation_ids('ceph'): |
153 | - for unit in related_units(r_id): |
154 | - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
155 | - return hosts |
156 | - |
157 | - |
158 | -def configure(service, key, auth): |
159 | - create_keyring(service, key) |
160 | - create_key_file(service, key) |
161 | - hosts = get_ceph_nodes() |
162 | - mon_hosts = ",".join(map(str, hosts)) |
163 | - keyring = keyring_path(service) |
164 | - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: |
165 | - ceph_conf.write(CEPH_CONF % locals()) |
166 | - modprobe_kernel_module('rbd') |
167 | - |
168 | - |
169 | -def image_mapped(image_name): |
170 | - (rc, out) = commands.getstatusoutput('rbd showmapped') |
171 | - return image_name in out |
172 | - |
173 | - |
174 | -def map_block_storage(service, pool, image): |
175 | - cmd = [ |
176 | - 'rbd', |
177 | - 'map', |
178 | - '%s/%s' % (pool, image), |
179 | - '--user', |
180 | - service, |
181 | - '--secret', |
182 | - keyfile_path(service), |
183 | - ] |
184 | - check_call(cmd) |
185 | - |
186 | - |
187 | -def filesystem_mounted(fs): |
188 | - return fs in [f for m, f in mounts()] |
189 | - |
190 | - |
191 | -def make_filesystem(blk_device, fstype='ext4', timeout=10): |
192 | - count = 0 |
193 | - e_noent = os.errno.ENOENT |
194 | - while not os.path.exists(blk_device): |
195 | - if count >= timeout: |
196 | - log('ceph: gave up waiting on block device %s' % blk_device, |
197 | - level=ERROR) |
198 | - raise IOError(e_noent, os.strerror(e_noent), blk_device) |
199 | - log('ceph: waiting for block device %s to appear' % blk_device, |
200 | - level=INFO) |
201 | - count += 1 |
202 | - time.sleep(1) |
203 | - else: |
204 | - log('ceph: Formatting block device %s as filesystem %s.' % |
205 | - (blk_device, fstype), level=INFO) |
206 | - check_call(['mkfs', '-t', fstype, blk_device]) |
207 | - |
208 | - |
209 | -def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): |
210 | - # mount block device into /mnt |
211 | - mount(blk_device, '/mnt') |
212 | - |
213 | - # copy data to /mnt |
214 | - try: |
215 | - copy_files(data_src_dst, '/mnt') |
216 | - except: |
217 | - pass |
218 | - |
219 | - # umount block device |
220 | - umount('/mnt') |
221 | - |
222 | - _dir = os.stat(data_src_dst) |
223 | - uid = _dir.st_uid |
224 | - gid = _dir.st_gid |
225 | - |
226 | - # re-mount where the data should originally be |
227 | - mount(blk_device, data_src_dst, persist=True) |
228 | - |
229 | - # ensure original ownership of new mount. |
230 | - cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] |
231 | - check_call(cmd) |
232 | - |
233 | - |
234 | -# TODO: re-use |
235 | -def modprobe_kernel_module(module): |
236 | - log('ceph: Loading kernel module', level=INFO) |
237 | - cmd = ['modprobe', module] |
238 | - check_call(cmd) |
239 | - cmd = 'echo %s >> /etc/modules' % module |
240 | - check_call(cmd, shell=True) |
241 | - |
242 | - |
243 | -def copy_files(src, dst, symlinks=False, ignore=None): |
244 | - for item in os.listdir(src): |
245 | - s = os.path.join(src, item) |
246 | - d = os.path.join(dst, item) |
247 | - if os.path.isdir(s): |
248 | - shutil.copytree(s, d, symlinks, ignore) |
249 | - else: |
250 | - shutil.copy2(s, d) |
251 | - |
252 | - |
253 | -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, |
254 | - blk_device, fstype, system_services=[]): |
255 | - """ |
256 | - To be called from the current cluster leader. |
257 | - Ensures given pool and RBD image exists, is mapped to a block device, |
258 | - and the device is formatted and mounted at the given mount_point. |
259 | - |
260 | - If formatting a device for the first time, data existing at mount_point |
261 | - will be migrated to the RBD device before being remounted. |
262 | - |
263 | - All services listed in system_services will be stopped prior to data |
264 | - migration and restarted when complete. |
265 | - """ |
266 | - # Ensure pool, RBD image, RBD mappings are in place. |
267 | - if not pool_exists(service, pool): |
268 | - log('ceph: Creating new pool %s.' % pool, level=INFO) |
269 | - create_pool(service, pool) |
270 | - |
271 | - if not rbd_exists(service, pool, rbd_img): |
272 | - log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO) |
273 | - create_rbd_image(service, pool, rbd_img, sizemb) |
274 | - |
275 | - if not image_mapped(rbd_img): |
276 | - log('ceph: Mapping RBD Image as a Block Device.', level=INFO) |
277 | - map_block_storage(service, pool, rbd_img) |
278 | - |
279 | - # make file system |
280 | - # TODO: What happens if for whatever reason this is run again and |
281 | - # the data is already in the rbd device and/or is mounted?? |
282 | - # When it is mounted already, it will fail to make the fs |
283 | - # XXX: This is really sketchy! Need to at least add an fstab entry |
284 | - # otherwise this hook will blow away existing data if its executed |
285 | - # after a reboot. |
286 | - if not filesystem_mounted(mount_point): |
287 | - make_filesystem(blk_device, fstype) |
288 | - |
289 | - for svc in system_services: |
290 | - if running(svc): |
291 | - log('Stopping services %s prior to migrating data.' % svc, |
292 | - level=INFO) |
293 | - service_stop(svc) |
294 | - |
295 | - place_data_on_ceph(service, blk_device, mount_point, fstype) |
296 | - |
297 | - for svc in system_services: |
298 | - service_start(svc) |
299 | |
300 | === added file 'charmhelpers/contrib/storage/linux/ceph.py' |
301 | --- charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000 |
302 | +++ charmhelpers/contrib/storage/linux/ceph.py 2013-09-23 10:51:06 +0000 |
303 | @@ -0,0 +1,336 @@ |
304 | +# |
305 | +# Copyright 2012 Canonical Ltd. |
306 | +# |
307 | +# This file is sourced from lp:openstack-charm-helpers |
308 | +# |
309 | +# Authors: |
310 | +# James Page <james.page@ubuntu.com> |
311 | +# Adam Gandelman <adamg@ubuntu.com> |
312 | +# |
313 | + |
314 | +import os |
315 | +import shutil |
316 | +import json |
317 | +import time |
318 | + |
319 | +from subprocess import ( |
320 | + check_call, |
321 | + check_output, |
322 | + CalledProcessError |
323 | +) |
324 | + |
325 | +from charmhelpers.core.hookenv import ( |
326 | + relation_get, |
327 | + relation_ids, |
328 | + related_units, |
329 | + log, |
330 | + INFO, |
331 | + WARNING, |
332 | + ERROR |
333 | +) |
334 | + |
335 | +from charmhelpers.core.host import ( |
336 | + mount, |
337 | + mounts, |
338 | + service_start, |
339 | + service_stop, |
340 | + service_running, |
341 | + umount, |
342 | +) |
343 | + |
344 | +from charmhelpers.fetch import ( |
345 | + apt_install, |
346 | +) |
347 | + |
348 | +KEYRING = '/etc/ceph/ceph.client.{}.keyring' |
349 | +KEYFILE = '/etc/ceph/ceph.client.{}.key' |
350 | + |
351 | +CEPH_CONF = """[global] |
352 | + auth supported = {auth} |
353 | + keyring = {keyring} |
354 | + mon host = {mon_hosts} |
355 | +""" |
356 | + |
357 | + |
358 | +def install(): |
359 | + ''' Basic Ceph client installation ''' |
360 | + ceph_dir = "/etc/ceph" |
361 | + if not os.path.exists(ceph_dir): |
362 | + os.mkdir(ceph_dir) |
363 | + apt_install('ceph-common', fatal=True) |
364 | + |
365 | + |
366 | +def rbd_exists(service, pool, rbd_img): |
367 | + ''' Check to see if a RADOS block device exists ''' |
368 | + try: |
369 | + out = check_output(['rbd', 'list', '--id', service, |
370 | + '--pool', pool]) |
371 | + except CalledProcessError: |
372 | + return False |
373 | + else: |
374 | + return rbd_img in out |
375 | + |
376 | + |
377 | +def create_rbd_image(service, pool, image, sizemb): |
378 | + ''' Create a new RADOS block device ''' |
379 | + cmd = [ |
380 | + 'rbd', |
381 | + 'create', |
382 | + image, |
383 | + '--size', |
384 | + str(sizemb), |
385 | + '--id', |
386 | + service, |
387 | + '--pool', |
388 | + pool |
389 | + ] |
390 | + check_call(cmd) |
391 | + |
392 | + |
393 | +def pool_exists(service, name): |
394 | + ''' Check to see if a RADOS pool already exists ''' |
395 | + try: |
396 | + out = check_output(['rados', '--id', service, 'lspools']) |
397 | + except CalledProcessError: |
398 | + return False |
399 | + else: |
400 | + return name in out |
401 | + |
402 | + |
403 | +def get_osds(): |
404 | + ''' |
405 | + Return a list of all Ceph Object Storage Daemons |
406 | + currently in the cluster |
407 | + ''' |
408 | + return json.loads(check_output(['ceph', 'osd', 'ls', '--format=json'])) |
409 | + |
410 | + |
411 | +def create_pool(service, name, replicas=2): |
412 | + ''' Create a new RADOS pool ''' |
413 | + if pool_exists(service, name): |
414 | + log("Ceph pool {} already exists, skipping creation".format(name), |
415 | + level=WARNING) |
416 | + return |
417 | + # Calculate the number of placement groups based |
418 | + # on upstream recommended best practices. |
419 | + pgnum = (len(get_osds()) * 100 / replicas) |
420 | + cmd = [ |
421 | + 'ceph', '--id', service, |
422 | + 'osd', 'pool', 'create', |
423 | + name, pgnum |
424 | + ] |
425 | + check_call(cmd) |
426 | + cmd = [ |
427 | + 'ceph', '--id', service, |
428 | + 'osd', 'set', name, |
429 | + 'size', replicas |
430 | + ] |
431 | + check_call(cmd) |
432 | + |
433 | + |
434 | +def delete_pool(service, name): |
435 | + ''' Delete a RADOS pool from ceph ''' |
436 | + cmd = [ |
437 | + 'ceph', '--id', service, |
438 | + 'osd', 'pool', 'delete', |
439 | + name, '--yes-i-really-really-mean-it' |
440 | + ] |
441 | + check_call(cmd) |
442 | + |
443 | + |
444 | +def _keyfile_path(service): |
445 | + return KEYFILE.format(service) |
446 | + |
447 | + |
448 | +def _keyring_path(service): |
449 | + return KEYRING.format(service) |
450 | + |
451 | + |
452 | +def create_keyring(service, key): |
453 | + ''' Create a new Ceph keyring containing key''' |
454 | + keyring = _keyring_path(service) |
455 | + if os.path.exists(keyring): |
456 | + log('ceph: Keyring exists at %s.' % keyring, level=WARNING) |
457 | + return |
458 | + cmd = [ |
459 | + 'ceph-authtool', |
460 | + keyring, |
461 | + '--create-keyring', |
462 | + '--name=client.{}'.format(service), |
463 | + '--add-key={}'.format(key) |
464 | + ] |
465 | + check_call(cmd) |
466 | + log('ceph: Created new ring at %s.' % keyring, level=INFO) |
467 | + |
468 | + |
469 | +def create_key_file(service, key): |
470 | + ''' Create a file containing key ''' |
471 | + keyfile = _keyfile_path(service) |
472 | + if os.path.exists(keyfile): |
473 | + log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) |
474 | + return |
475 | + with open(keyfile, 'w') as fd: |
476 | + fd.write(key) |
477 | + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) |
478 | + |
479 | + |
480 | +def get_ceph_nodes(): |
481 | + ''' Query named relation 'ceph' to detemine current nodes ''' |
482 | + hosts = [] |
483 | + for r_id in relation_ids('ceph'): |
484 | + for unit in related_units(r_id): |
485 | + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
486 | + return hosts |
487 | + |
488 | + |
489 | +def configure(service, key, auth): |
490 | + ''' Perform basic configuration of Ceph ''' |
491 | + create_keyring(service, key) |
492 | + create_key_file(service, key) |
493 | + hosts = get_ceph_nodes() |
494 | + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: |
495 | + ceph_conf.write(CEPH_CONF.format(auth=auth, |
496 | + keyring=_keyring_path(service), |
497 | + mon_hosts=",".join(map(str, hosts)))) |
498 | + modprobe('rbd') |
499 | + |
500 | + |
501 | +def image_mapped(name): |
502 | + ''' Determine whether a RADOS block device is mapped locally ''' |
503 | + try: |
504 | + out = check_output(['rbd', 'showmapped']) |
505 | + except CalledProcessError: |
506 | + return False |
507 | + else: |
508 | + return name in out |
509 | + |
510 | + |
511 | +def map_block_storage(service, pool, image): |
512 | + ''' Map a RADOS block device for local use ''' |
513 | + cmd = [ |
514 | + 'rbd', |
515 | + 'map', |
516 | + '{}/{}'.format(pool, image), |
517 | + '--user', |
518 | + service, |
519 | + '--secret', |
520 | + _keyfile_path(service), |
521 | + ] |
522 | + check_call(cmd) |
523 | + |
524 | + |
525 | +def filesystem_mounted(fs): |
526 | + ''' Determine whether a filesytems is already mounted ''' |
527 | + return fs in [f for f, m in mounts()] |
528 | + |
529 | + |
530 | +def make_filesystem(blk_device, fstype='ext4', timeout=10): |
531 | + ''' Make a new filesystem on the specified block device ''' |
532 | + count = 0 |
533 | + e_noent = os.errno.ENOENT |
534 | + while not os.path.exists(blk_device): |
535 | + if count >= timeout: |
536 | + log('ceph: gave up waiting on block device %s' % blk_device, |
537 | + level=ERROR) |
538 | + raise IOError(e_noent, os.strerror(e_noent), blk_device) |
539 | + log('ceph: waiting for block device %s to appear' % blk_device, |
540 | + level=INFO) |
541 | + count += 1 |
542 | + time.sleep(1) |
543 | + else: |
544 | + log('ceph: Formatting block device %s as filesystem %s.' % |
545 | + (blk_device, fstype), level=INFO) |
546 | + check_call(['mkfs', '-t', fstype, blk_device]) |
547 | + |
548 | + |
549 | +def place_data_on_block_device(blk_device, data_src_dst): |
550 | + ''' Migrate data in data_src_dst to blk_device and then remount ''' |
551 | + # mount block device into /mnt |
552 | + mount(blk_device, '/mnt') |
553 | + # copy data to /mnt |
554 | + copy_files(data_src_dst, '/mnt') |
555 | + # umount block device |
556 | + umount('/mnt') |
557 | + # Grab user/group ID's from original source |
558 | + _dir = os.stat(data_src_dst) |
559 | + uid = _dir.st_uid |
560 | + gid = _dir.st_gid |
561 | + # re-mount where the data should originally be |
562 | + # TODO: persist is currently a NO-OP in core.host |
563 | + mount(blk_device, data_src_dst, persist=True) |
564 | + # ensure original ownership of new mount. |
565 | + os.chown(data_src_dst, uid, gid) |
566 | + |
567 | + |
568 | +# TODO: re-use |
569 | +def modprobe(module): |
570 | + ''' Load a kernel module and configure for auto-load on reboot ''' |
571 | + log('ceph: Loading kernel module', level=INFO) |
572 | + cmd = ['modprobe', module] |
573 | + check_call(cmd) |
574 | + with open('/etc/modules', 'r+') as modules: |
575 | + if module not in modules.read(): |
576 | + modules.write(module) |
577 | + |
578 | + |
579 | +def copy_files(src, dst, symlinks=False, ignore=None): |
580 | + ''' Copy files from src to dst ''' |
581 | + for item in os.listdir(src): |
582 | + s = os.path.join(src, item) |
583 | + d = os.path.join(dst, item) |
584 | + if os.path.isdir(s): |
585 | + shutil.copytree(s, d, symlinks, ignore) |
586 | + else: |
587 | + shutil.copy2(s, d) |
588 | + |
589 | + |
590 | +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, |
591 | + blk_device, fstype, system_services=[]): |
592 | + """ |
593 | + NOTE: This function must only be called from a single service unit for |
594 | + the same rbd_img otherwise data loss will occur. |
595 | + |
596 | + Ensures given pool and RBD image exists, is mapped to a block device, |
597 | + and the device is formatted and mounted at the given mount_point. |
598 | + |
599 | + If formatting a device for the first time, data existing at mount_point |
600 | + will be migrated to the RBD device before being re-mounted. |
601 | + |
602 | + All services listed in system_services will be stopped prior to data |
603 | + migration and restarted when complete. |
604 | + """ |
605 | + # Ensure pool, RBD image, RBD mappings are in place. |
606 | + if not pool_exists(service, pool): |
607 | + log('ceph: Creating new pool {}.'.format(pool)) |
608 | + create_pool(service, pool) |
609 | + |
610 | + if not rbd_exists(service, pool, rbd_img): |
611 | + log('ceph: Creating RBD image ({}).'.format(rbd_img)) |
612 | + create_rbd_image(service, pool, rbd_img, sizemb) |
613 | + |
614 | + if not image_mapped(rbd_img): |
615 | + log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) |
616 | + map_block_storage(service, pool, rbd_img) |
617 | + |
618 | + # make file system |
619 | + # TODO: What happens if for whatever reason this is run again and |
620 | + # the data is already in the rbd device and/or is mounted?? |
621 | + # When it is mounted already, it will fail to make the fs |
622 | + # XXX: This is really sketchy! Need to at least add an fstab entry |
623 | + # otherwise this hook will blow away existing data if its executed |
624 | + # after a reboot. |
625 | + if not filesystem_mounted(mount_point): |
626 | + make_filesystem(blk_device, fstype) |
627 | + |
628 | + for svc in system_services: |
629 | + if service_running(svc): |
630 | + log('ceph: Stopping services {} prior to migrating data.' |
631 | + .format(svc)) |
632 | + service_stop(svc) |
633 | + |
634 | + place_data_on_block_device(blk_device, mount_point) |
635 | + |
636 | + for svc in system_services: |
637 | + log('ceph: Starting service {} after migrating data.' |
638 | + .format(svc)) |
639 | + service_start(svc) |
640 | |
641 | === renamed file 'tests/contrib/hahelpers/test_ceph_utils.py' => 'tests/contrib/storage/test_linux_ceph.py' |
642 | --- tests/contrib/hahelpers/test_ceph_utils.py 2013-08-13 01:12:03 +0000 |
643 | +++ tests/contrib/storage/test_linux_ceph.py 2013-09-23 10:51:06 +0000 |
644 | @@ -1,12 +1,15 @@ |
645 | -from mock import patch |
646 | +from mock import patch, call |
647 | |
648 | from shutil import rmtree |
649 | from tempfile import mkdtemp |
650 | from threading import Timer |
651 | from testtools import TestCase |
652 | +import json |
653 | |
654 | +import charmhelpers.contrib.storage.linux.ceph as ceph_utils |
655 | +from subprocess import CalledProcessError |
656 | +from tests.helpers import patch_open |
657 | import nose.plugins.attrib |
658 | -import charmhelpers.contrib.hahelpers.ceph as ceph_utils |
659 | import os |
660 | import time |
661 | |
662 | @@ -17,12 +20,24 @@ |
663 | rbd |
664 | """ |
665 | |
666 | +LS_RBDS = """ |
667 | +rbd1 |
668 | +rbd2 |
669 | +rbd3 |
670 | +""" |
671 | + |
672 | +IMG_MAP = """ |
673 | +bar |
674 | +baz |
675 | +""" |
676 | + |
677 | |
678 | class CephUtilsTests(TestCase): |
679 | def setUp(self): |
680 | super(CephUtilsTests, self).setUp() |
681 | [self._patch(m) for m in [ |
682 | 'check_call', |
683 | + 'check_output', |
684 | 'log', |
685 | ]] |
686 | |
687 | @@ -32,37 +47,352 @@ |
688 | self.addCleanup(_m.stop) |
689 | setattr(self, method, mock) |
690 | |
691 | - def test_create_keyring(self): |
692 | + @patch('os.path.exists') |
693 | + def test_create_keyring(self, _exists): |
694 | '''It creates a new ceph keyring''' |
695 | + _exists.return_value = False |
696 | ceph_utils.create_keyring('cinder', 'cephkey') |
697 | _cmd = ['ceph-authtool', '/etc/ceph/ceph.client.cinder.keyring', |
698 | '--create-keyring', '--name=client.cinder', |
699 | '--add-key=cephkey'] |
700 | self.check_call.assert_called_with(_cmd) |
701 | |
702 | - def test_create_pool(self): |
703 | - '''It creates rados pool correctly''' |
704 | - ceph_utils.create_pool(service='cinder', name='foo') |
705 | - self.check_call.assert_called_with( |
706 | - ['rados', '--id', 'cinder', 'mkpool', 'foo'] |
707 | - ) |
708 | + @patch('os.path.exists') |
709 | + def test_create_keyring_already_exists(self, _exists): |
710 | + '''It creates a new ceph keyring''' |
711 | + _exists.return_value = True |
712 | + ceph_utils.create_keyring('cinder', 'cephkey') |
713 | + self.log.assert_called() |
714 | + self.check_call.assert_not_called() |
715 | + |
716 | + @patch('os.path.exists') |
717 | + def test_create_keyfile(self, _exists): |
718 | + '''It creates a new ceph keyfile''' |
719 | + _exists.return_value = False |
720 | + with patch_open() as (_open, _file): |
721 | + ceph_utils.create_key_file('cinder', 'cephkey') |
722 | + _file.write.assert_called_with('cephkey') |
723 | + self.log.assert_called() |
724 | + |
725 | + @patch('os.path.exists') |
726 | + def test_create_key_file_already_exists(self, _exists): |
727 | + '''It creates a new ceph keyring''' |
728 | + _exists.return_value = True |
729 | + ceph_utils.create_key_file('cinder', 'cephkey') |
730 | + self.log.assert_called() |
731 | + |
732 | + @patch('os.mkdir') |
733 | + @patch.object(ceph_utils, 'apt_install') |
734 | + @patch('os.path.exists') |
735 | + def test_install(self, _exists, _install, _mkdir): |
736 | + _exists.return_value = False |
737 | + ceph_utils.install() |
738 | + _mkdir.assert_called_with('/etc/ceph') |
739 | + _install.assert_called_with('ceph-common', fatal=True) |
740 | + |
741 | + def test_get_osds(self): |
742 | + self.check_output.return_value = json.dumps([1, 2, 3]) |
743 | + self.assertEquals(ceph_utils.get_osds(), [1, 2, 3]) |
744 | + |
745 | + def test_get_osds_none(self): |
746 | + self.check_output.return_value = json.dumps(None) |
747 | + self.assertEquals(ceph_utils.get_osds(), None) |
748 | + |
749 | + @patch.object(ceph_utils, 'get_osds') |
750 | + @patch.object(ceph_utils, 'pool_exists') |
751 | + def test_create_pool(self, _exists, _get_osds): |
752 | + '''It creates rados pool correctly with default replicas ''' |
753 | + _exists.return_value = False |
754 | + _get_osds.return_value = [1, 2, 3] |
755 | + ceph_utils.create_pool(service='cinder', name='foo') |
756 | + self.check_call.assert_has_calls([ |
757 | + call(['ceph', '--id', 'cinder', 'osd', 'pool', |
758 | + 'create', 'foo', 150]), |
759 | + call(['ceph', '--id', 'cinder', 'osd', 'set', |
760 | + 'foo', 'size', 2]) |
761 | + ]) |
762 | + |
763 | + @patch.object(ceph_utils, 'get_osds') |
764 | + @patch.object(ceph_utils, 'pool_exists') |
765 | + def test_create_pool_3_replicas(self, _exists, _get_osds): |
766 | + '''It creates rados pool correctly with 3 replicas''' |
767 | + _exists.return_value = False |
768 | + _get_osds.return_value = [1, 2, 3] |
769 | + ceph_utils.create_pool(service='cinder', name='foo', replicas=3) |
770 | + self.check_call.assert_has_calls([ |
771 | + call(['ceph', '--id', 'cinder', 'osd', 'pool', |
772 | + 'create', 'foo', 100]), |
773 | + call(['ceph', '--id', 'cinder', 'osd', 'set', |
774 | + 'foo', 'size', 3]) |
775 | + ]) |
776 | + |
777 | + def test_create_pool_already_exists(self): |
778 | + self._patch('pool_exists') |
779 | + self.pool_exists.return_value = True |
780 | + ceph_utils.create_pool(service='cinder', name='foo') |
781 | + self.log.assert_called() |
782 | + self.check_call.assert_not_called() |
783 | |
784 | def test_keyring_path(self): |
785 | '''It correctly dervies keyring path from service name''' |
786 | - result = ceph_utils.keyring_path('cinder') |
787 | + result = ceph_utils._keyring_path('cinder') |
788 | self.assertEquals('/etc/ceph/ceph.client.cinder.keyring', result) |
789 | |
790 | - @patch('commands.getstatusoutput') |
791 | - def test_pool_exists(self, get_output): |
792 | + def test_keyfile_path(self): |
793 | + '''It correctly dervies keyring path from service name''' |
794 | + result = ceph_utils._keyfile_path('cinder') |
795 | + self.assertEquals('/etc/ceph/ceph.client.cinder.key', result) |
796 | + |
797 | + def test_pool_exists(self): |
798 | '''It detects an rbd pool exists''' |
799 | - get_output.return_value = (0, LS_POOLS) |
800 | + self.check_output.return_value = LS_POOLS |
801 | self.assertTrue(ceph_utils.pool_exists('cinder', 'volumes')) |
802 | |
803 | - @patch('commands.getstatusoutput') |
804 | - def test_pool_does_not_exist(self, get_output): |
805 | + def test_pool_does_not_exist(self): |
806 | '''It detects an rbd pool exists''' |
807 | - get_output.return_value = (0, LS_POOLS) |
808 | - self.assertFalse(ceph_utils.pool_exists('cinder', 'foo')) |
809 | + self.check_output.return_value = LS_POOLS |
810 | + self.assertFalse(ceph_utils.pool_exists('cinder', 'foo')) |
811 | + |
812 | + def test_pool_exists_error(self): |
813 | + ''' Ensure subprocess errors and sandboxed with False ''' |
814 | + self.check_output.side_effect = CalledProcessError(1, 'rados') |
815 | + self.assertFalse(ceph_utils.pool_exists('cinder', 'foo')) |
816 | + |
817 | + def test_rbd_exists(self): |
818 | + self.check_output.return_value = LS_RBDS |
819 | + self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1')) |
820 | + self.check_output.assert_call_with( |
821 | + ['rbd', 'list', '--id', 'service', '--pool', 'pool'] |
822 | + ) |
823 | + |
824 | + def test_rbd_does_not_exist(self): |
825 | + self.check_output.return_value = LS_RBDS |
826 | + self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4')) |
827 | + self.check_output.assert_call_with( |
828 | + ['rbd', 'list', '--id', 'service', '--pool', 'pool'] |
829 | + ) |
830 | + |
831 | + def test_rbd_exists_error(self): |
832 | + ''' Ensure subprocess errors and sandboxed with False ''' |
833 | + self.check_output.side_effect = CalledProcessError(1, 'rbd') |
834 | + self.assertFalse(ceph_utils.rbd_exists('cinder', 'foo', 'rbd')) |
835 | + |
836 | + def test_create_rbd_image(self): |
837 | + ceph_utils.create_rbd_image('service', 'pool', 'image', 128) |
838 | + _cmd = ['rbd', 'create', 'image', |
839 | + '--size', '128', |
840 | + '--id', 'service', |
841 | + '--pool', 'pool'] |
842 | + self.check_call.assert_called_with(_cmd) |
843 | + |
844 | + def test_delete_pool(self): |
845 | + ceph_utils.delete_pool('cinder', 'pool') |
846 | + _cmd = [ |
847 | + 'ceph', '--id', 'cinder', |
848 | + 'osd', 'pool', 'delete', |
849 | + 'pool', '--yes-i-really-really-mean-it' |
850 | + ] |
851 | + self.check_call.assert_called_with(_cmd) |
852 | + |
853 | + def test_get_ceph_nodes(self): |
854 | + self._patch('relation_ids') |
855 | + self._patch('related_units') |
856 | + self._patch('relation_get') |
857 | + units = ['ceph/1', 'ceph2', 'ceph/3'] |
858 | + self.relation_ids.return_value = ['ceph:0'] |
859 | + self.related_units.return_value = units |
860 | + self.relation_get.return_value = '192.168.1.1' |
861 | + self.assertEquals(len(ceph_utils.get_ceph_nodes()), 3) |
862 | + |
863 | + def test_get_ceph_nodes_not_related(self): |
864 | + self._patch('relation_ids') |
865 | + self.relation_ids.return_value = [] |
866 | + self.assertEquals(ceph_utils.get_ceph_nodes(), []) |
867 | + |
868 | + def test_configure(self): |
869 | + self._patch('create_keyring') |
870 | + self._patch('create_key_file') |
871 | + self._patch('get_ceph_nodes') |
872 | + self._patch('modprobe') |
873 | + _hosts = ['192.168.1.1', '192.168.1.2'] |
874 | + self.get_ceph_nodes.return_value = _hosts |
875 | + _conf = ceph_utils.CEPH_CONF.format( |
876 | + auth='cephx', |
877 | + keyring=ceph_utils._keyring_path('cinder'), |
878 | + mon_hosts=",".join(map(str, _hosts)) |
879 | + ) |
880 | + with patch_open() as (_open, _file): |
881 | + ceph_utils.configure('cinder', 'key', 'cephx') |
882 | + _file.write.assert_called_with(_conf) |
883 | + _open.assert_called_with('/etc/ceph/ceph.conf', 'w') |
884 | + self.modprobe.assert_called_with('rbd') |
885 | + self.create_keyring.assert_called_with('cinder', 'key') |
886 | + self.create_key_file.assert_called_with('cinder', 'key') |
887 | + |
888 | + def test_image_mapped(self): |
889 | + self.check_output.return_value = IMG_MAP |
890 | + self.assertTrue(ceph_utils.image_mapped('bar')) |
891 | + |
892 | + def test_image_not_mapped(self): |
893 | + self.check_output.return_value = IMG_MAP |
894 | + self.assertFalse(ceph_utils.image_mapped('foo')) |
895 | + |
896 | + def test_image_not_mapped_error(self): |
897 | + self.check_output.side_effect = CalledProcessError(1, 'rbd') |
898 | + self.assertFalse(ceph_utils.image_mapped('bar')) |
899 | + |
900 | + def test_map_block_storage(self): |
901 | + _service = 'cinder' |
902 | + _pool = 'bar' |
903 | + _img = 'foo' |
904 | + _cmd = [ |
905 | + 'rbd', |
906 | + 'map', |
907 | + '{}/{}'.format(_pool, _img), |
908 | + '--user', |
909 | + _service, |
910 | + '--secret', |
911 | + ceph_utils._keyfile_path(_service), |
912 | + ] |
913 | + ceph_utils.map_block_storage(_service, _pool, _img) |
914 | + self.check_call.assert_called_with(_cmd) |
915 | + |
916 | + def test_modprobe(self): |
917 | + with patch_open() as (_open, _file): |
918 | + _file.read.return_value = 'anothermod\n' |
919 | + ceph_utils.modprobe('mymod') |
920 | + _open.assert_called_with('/etc/modules', 'r+') |
921 | + _file.read.assert_called() |
922 | + _file.write.assert_called_with('mymod') |
923 | + self.check_call.assert_called_with(['modprobe', 'mymod']) |
924 | + |
925 | + def test_filesystem_mounted(self): |
926 | + self._patch('mounts') |
927 | + self.mounts.return_value = [['/afs', '/dev/sdb'], ['/bfs', '/dev/sdd']] |
928 | + self.assertTrue(ceph_utils.filesystem_mounted('/afs')) |
929 | + self.assertFalse(ceph_utils.filesystem_mounted('/zfs')) |
930 | + |
931 | + @patch('os.path.exists') |
932 | + def test_make_filesystem(self, _exists): |
933 | + _exists.return_value = True |
934 | + ceph_utils.make_filesystem('/dev/sdd') |
935 | + self.log.assert_called() |
936 | + self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd']) |
937 | + |
938 | + @patch('os.path.exists') |
939 | + def test_make_filesystem_xfs(self, _exists): |
940 | + _exists.return_value = True |
941 | + ceph_utils.make_filesystem('/dev/sdd', 'xfs') |
942 | + self.log.assert_called() |
943 | + self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd']) |
944 | + |
945 | + @patch('os.chown') |
946 | + @patch('os.stat') |
947 | + def test_place_data_on_block_device(self, _stat, _chown): |
948 | + self._patch('mount') |
949 | + self._patch('copy_files') |
950 | + self._patch('umount') |
951 | + _stat.return_value.st_uid = 100 |
952 | + _stat.return_value.st_gid = 100 |
953 | + ceph_utils.place_data_on_block_device('/dev/sdd', '/var/lib/mysql') |
954 | + self.mount.assert_has_calls([ |
955 | + call('/dev/sdd', '/mnt'), |
956 | + call('/dev/sdd', '/var/lib/mysql', persist=True) |
957 | + ]) |
958 | + self.copy_files.assert_called_with('/var/lib/mysql', '/mnt') |
959 | + self.umount.assert_called_with('/mnt') |
960 | + _chown.assert_called_with('/var/lib/mysql', 100, 100) |
961 | + |
962 | + @patch('shutil.copytree') |
963 | + @patch('os.listdir') |
964 | + @patch('os.path.isdir') |
965 | + def test_copy_files_is_dir(self, _isdir, _listdir, _copytree): |
966 | + _isdir.return_value = True |
967 | + subdirs = ['a', 'b', 'c'] |
968 | + _listdir.return_value = subdirs |
969 | + ceph_utils.copy_files('/source', '/dest') |
970 | + for d in subdirs: |
971 | + _copytree.assert_has_calls([ |
972 | + call('/source/{}'.format(d), '/dest/{}'.format(d), |
973 | + False, None) |
974 | + ]) |
975 | + |
976 | + @patch('shutil.copytree') |
977 | + @patch('os.listdir') |
978 | + @patch('os.path.isdir') |
979 | + def test_copy_files_include_symlinks(self, _isdir, _listdir, _copytree): |
980 | + _isdir.return_value = True |
981 | + subdirs = ['a', 'b', 'c'] |
982 | + _listdir.return_value = subdirs |
983 | + ceph_utils.copy_files('/source', '/dest', True) |
984 | + for d in subdirs: |
985 | + _copytree.assert_has_calls([ |
986 | + call('/source/{}'.format(d), '/dest/{}'.format(d), |
987 | + True, None) |
988 | + ]) |
989 | + |
990 | + @patch('shutil.copytree') |
991 | + @patch('os.listdir') |
992 | + @patch('os.path.isdir') |
993 | + def test_copy_files_ignore(self, _isdir, _listdir, _copytree): |
994 | + _isdir.return_value = True |
995 | + subdirs = ['a', 'b', 'c'] |
996 | + _listdir.return_value = subdirs |
997 | + ceph_utils.copy_files('/source', '/dest', True, False) |
998 | + for d in subdirs: |
999 | + _copytree.assert_has_calls([ |
1000 | + call('/source/{}'.format(d), '/dest/{}'.format(d), |
1001 | + True, False) |
1002 | + ]) |
1003 | + |
1004 | + @patch('shutil.copy2') |
1005 | + @patch('os.listdir') |
1006 | + @patch('os.path.isdir') |
1007 | + def test_copy_files_files(self, _isdir, _listdir, _copy2): |
1008 | + _isdir.return_value = False |
1009 | + files = ['a', 'b', 'c'] |
1010 | + _listdir.return_value = files |
1011 | + ceph_utils.copy_files('/source', '/dest') |
1012 | + for f in files: |
1013 | + _copy2.assert_has_calls([ |
1014 | + call('/source/{}'.format(f), '/dest/{}'.format(f)) |
1015 | + ]) |
1016 | + |
1017 | + def test_ensure_ceph_storage(self): |
1018 | + self._patch('pool_exists') |
1019 | + self.pool_exists.return_value = False |
1020 | + self._patch('create_pool') |
1021 | + self._patch('rbd_exists') |
1022 | + self.rbd_exists.return_value = False |
1023 | + self._patch('create_rbd_image') |
1024 | + self._patch('image_mapped') |
1025 | + self.image_mapped.return_value = False |
1026 | + self._patch('map_block_storage') |
1027 | + self._patch('filesystem_mounted') |
1028 | + self.filesystem_mounted.return_value = False |
1029 | + self._patch('make_filesystem') |
1030 | + self._patch('service_stop') |
1031 | + self._patch('service_start') |
1032 | + self._patch('service_running') |
1033 | + self.service_running.return_value = True |
1034 | + self._patch('place_data_on_block_device') |
1035 | + _service = 'mysql' |
1036 | + _pool = 'bar' |
1037 | + _rbd_img = 'foo' |
1038 | + _mount = '/var/lib/mysql' |
1039 | + _services = ['mysql'] |
1040 | + _blk_dev = '/dev/rbd1' |
1041 | + ceph_utils.ensure_ceph_storage(_service, _pool, |
1042 | + _rbd_img, 1024, _mount, |
1043 | + _blk_dev, 'ext4', _services) |
1044 | + self.create_pool.assert_called_with(_service, _pool) |
1045 | + self.create_rbd_image.assert_called_with(_service, _pool, |
1046 | + _rbd_img, 1024) |
1047 | + self.map_block_storage.assert_called_with(_service, _pool, _rbd_img) |
1048 | + self.make_filesystem.assert_called_with(_blk_dev, 'ext4') |
1049 | + self.service_stop.assert_called_with(_services[0]) |
1050 | + self.place_data_on_block_device.assert_called_with(_blk_dev, _mount) |
1051 | + self.service_start.assert_called_with(_services[0]) |
1052 | |
1053 | def test_make_filesystem_default_filesystem(self): |
1054 | '''make_filesystem() uses ext4 as the default filesystem.''' |
1055 | @@ -126,6 +456,7 @@ |
1056 | fstype = 'xfs' |
1057 | ceph_utils.make_filesystem(device, fstype) |
1058 | self.check_call.assert_called_with(['mkfs', '-t', fstype, device]) |
1059 | - self.log.assert_called_with('ceph: Formatting block device %s as ' |
1060 | - 'filesystem %s.' % (device, fstype), level='INFO') |
1061 | - |
1062 | + self.log.assert_called_with( |
1063 | + 'ceph: Formatting block device %s as ' |
1064 | + 'filesystem %s.' % (device, fstype), level='INFO' |
1065 | + ) |
1066 | |
1067 | === added file 'tests/helpers.py' |
1068 | --- tests/helpers.py 1970-01-01 00:00:00 +0000 |
1069 | +++ tests/helpers.py 2013-09-23 10:51:06 +0000 |
1070 | @@ -0,0 +1,34 @@ |
1071 | +''' General helper functions for tests ''' |
1072 | +from contextlib import contextmanager |
1073 | +from mock import patch, MagicMock |
1074 | +import io |
1075 | + |
1076 | + |
1077 | +@contextmanager |
1078 | +def patch_open(): |
1079 | + '''Patch open() to allow mocking both open() itself and the file that is |
1080 | + yielded. |
1081 | + |
1082 | + Yields the mock for "open" and "file", respectively.''' |
1083 | + mock_open = MagicMock(spec=open) |
1084 | + mock_file = MagicMock(spec=file) |
1085 | + |
1086 | + @contextmanager |
1087 | + def stub_open(*args, **kwargs): |
1088 | + mock_open(*args, **kwargs) |
1089 | + yield mock_file |
1090 | + |
1091 | + with patch('__builtin__.open', stub_open): |
1092 | + yield mock_open, mock_file |
1093 | + |
1094 | + |
1095 | +@contextmanager |
1096 | +def mock_open(filename, contents=None): |
1097 | + ''' Slightly simpler mock of open to return contents for filename ''' |
1098 | + def mock_file(*args): |
1099 | + if args[0] == filename: |
1100 | + return io.StringIO(contents) |
1101 | + else: |
1102 | + return open(*args) |
1103 | + with patch('__builtin__.open', mock_file): |
1104 | + yield |
LGTM. At this point we should probably have all tests that use patch_open( )/mock_ open() using the same copy.