Merge lp:~chad.smith/charms/precise/mysql/mysql-ha-with-health into lp:charms/mysql
- Precise Pangolin (12.04)
- mysql-ha-with-health
- Merge into trunk
Proposed by
Chad Smith
Status: | Superseded |
---|---|
Proposed branch: | lp:~chad.smith/charms/precise/mysql/mysql-ha-with-health |
Merge into: | lp:charms/mysql |
Diff against target: |
1376 lines (+1038/-149) 20 files modified
config.yaml (+36/-0) hooks/common.py (+2/-2) hooks/config-changed (+1/-1) hooks/ha_relations.py (+146/-0) hooks/install (+2/-1) hooks/lib/ceph_utils.py (+256/-0) hooks/lib/cluster_utils.py (+130/-0) hooks/lib/utils.py (+283/-0) hooks/master-relation-changed (+1/-1) hooks/monitors.common.bash (+1/-1) hooks/shared-db-relations (+0/-137) hooks/shared_db_relations.py (+139/-0) hooks/slave-relation-broken (+2/-2) hooks/slave-relation-changed (+2/-2) hooks/upgrade-charm (+17/-1) metadata.yaml (+8/-0) revision (+1/-1) scripts/add_to_cluster (+2/-0) scripts/health_checks.d/service_crm_access (+7/-0) scripts/remove_from_cluster (+2/-0) |
To merge this branch: | bzr merge lp:~chad.smith/charms/precise/mysql/mysql-ha-with-health |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jerry Seutter | Pending | ||
Review via email: mp+155507@code.launchpad.net |
This proposal has been superseded by a proposal from 2013-03-26.
Commit message
Description of the change
Add mysql health checks and add_to_
Comparable changes have gone into most of the other openstack services.
To post a comment you must log in.
Revision history for this message
Chad Smith (chad.smith) wrote : | # |
Unmerged revisions
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'config.yaml' |
2 | --- config.yaml 2012-09-13 21:44:01 +0000 |
3 | +++ config.yaml 2013-03-26 14:36:21 +0000 |
4 | @@ -31,3 +31,39 @@ |
5 | default: 'MIXED' |
6 | type: string |
7 | description: If binlogging is enabled, this is the format that will be used. Ignored when tuning-level == fast. |
8 | + vip: |
9 | + type: string |
10 | + description: "Virtual IP to use to front mysql in ha configuration" |
11 | + vip_iface: |
12 | + type: string |
13 | + default: eth0 |
14 | + description: "Network Interface where to place the Virtual IP" |
15 | + vip_cidr: |
16 | + type: int |
17 | + default: 24 |
18 | + description: "Netmask that will be used for the Virtual IP" |
19 | + ha-bindiface: |
20 | + type: string |
21 | + default: eth0 |
22 | + description: | |
23 | + Default network interface on which HA cluster will bind to communication |
24 | + with the other members of the HA Cluster. |
25 | + ha-mcastport: |
26 | + type: int |
27 | + default: 5411 |
28 | + description: | |
29 | + Default multicast port number that will be used to communicate between |
30 | + HA Cluster nodes. |
31 | + block-size: |
32 | + type: int |
33 | + default: 5 |
34 | + description: | |
35 | + Default block storage size to create when setting up MySQL block storage. |
36 | + This value should be specified in GB (e.g. 100 not 100GB). |
37 | + rbd-name: |
38 | + type: string |
39 | + default: mysql1 |
40 | + description: | |
41 | + The name that will be used to create the Ceph's RBD image with. If the |
42 | + image name exists in Ceph, it will be re-used and the data will be |
43 | + overwritten. |
44 | |
45 | === added symlink 'hooks/ceph-relation-changed' |
46 | === target is u'ha_relations.py' |
47 | === added symlink 'hooks/ceph-relation-joined' |
48 | === target is u'ha_relations.py' |
49 | === added symlink 'hooks/cluster-relation-changed' |
50 | === target is u'ha_relations.py' |
51 | === modified file 'hooks/common.py' |
52 | --- hooks/common.py 2012-11-29 16:31:14 +0000 |
53 | +++ hooks/common.py 2013-03-26 14:36:21 +0000 |
54 | @@ -7,7 +7,7 @@ |
55 | import uuid |
56 | |
57 | def get_service_user_file(service): |
58 | - return '/var/lib/juju/%s.service_user2' % service |
59 | + return '/var/lib/mysql/%s.service_user2' % service |
60 | |
61 | |
62 | def get_service_user(service): |
63 | @@ -58,7 +58,7 @@ |
64 | |
65 | def get_db_cursor(): |
66 | # Connect to mysql |
67 | - passwd = open("/var/lib/juju/mysql.passwd").read().strip() |
68 | + passwd = open("/var/lib/mysql/mysql.passwd").read().strip() |
69 | connection = MySQLdb.connect(user="root", host="localhost", passwd=passwd) |
70 | return connection.cursor() |
71 | |
72 | |
73 | === modified file 'hooks/config-changed' |
74 | --- hooks/config-changed 2012-11-21 17:14:24 +0000 |
75 | +++ hooks/config-changed 2013-03-26 14:36:21 +0000 |
76 | @@ -89,7 +89,7 @@ |
77 | check_call(['add-apt-repository','-y','deb http://%s %s main' % (source, series)]) |
78 | check_call(['apt-get','update']) |
79 | |
80 | -with open('/var/lib/juju/mysql.passwd','r') as rpw: |
81 | +with open('/var/lib/mysql/mysql.passwd','r') as rpw: |
82 | root_pass = rpw.read() |
83 | |
84 | dconf = Popen(['debconf-set-selections'], stdin=PIPE) |
85 | |
86 | === added symlink 'hooks/ha-relation-changed' |
87 | === target is u'ha_relations.py' |
88 | === added symlink 'hooks/ha-relation-joined' |
89 | === target is u'ha_relations.py' |
90 | === added file 'hooks/ha_relations.py' |
91 | --- hooks/ha_relations.py 1970-01-01 00:00:00 +0000 |
92 | +++ hooks/ha_relations.py 2013-03-26 14:36:21 +0000 |
93 | @@ -0,0 +1,146 @@ |
94 | +#!/usr/bin/env python |
95 | + |
96 | +import sys |
97 | +import os |
98 | + |
99 | +import lib.utils as utils |
100 | +import lib.ceph_utils as ceph |
101 | +import lib.cluster_utils as cluster |
102 | + |
103 | +# CEPH |
104 | +DATA_SRC_DST = '/var/lib/mysql' |
105 | +SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0] |
106 | +POOL_NAME = SERVICE_NAME |
107 | +LEADER_RES = 'res_mysql_vip' |
108 | + |
109 | + |
110 | +def ha_relation_joined(): |
111 | + vip = utils.config_get('vip') |
112 | + vip_iface = utils.config_get('vip_iface') |
113 | + vip_cidr = utils.config_get('vip_cidr') |
114 | + corosync_bindiface = utils.config_get('ha-bindiface') |
115 | + corosync_mcastport = utils.config_get('ha-mcastport') |
116 | + |
117 | + if None in [vip, vip_cidr, vip_iface]: |
118 | + utils.juju_log('WARNING', |
119 | + 'Insufficient VIP information to configure cluster') |
120 | + sys.exit(1) |
121 | + |
122 | + # Starting configuring resources. |
123 | + init_services = { |
124 | + 'res_mysqld': 'mysql', |
125 | + } |
126 | + |
127 | + # If the 'ha' relation has been made *before* the 'ceph' relation, |
128 | + # it doesn't make sense to make it until after the 'ceph' relation is made |
129 | + if not utils.is_relation_made('ceph', 'auth'): |
130 | + utils.juju_log('INFO', |
131 | + '*ceph* relation does not exist. ' |
132 | + 'Not sending *ha* relation data yet') |
133 | + return |
134 | + else: |
135 | + utils.juju_log('INFO', |
136 | + '*ceph* relation exists. Sending *ha* relation data') |
137 | + |
138 | + block_storage = 'ceph' |
139 | + |
140 | + resources = { |
141 | + 'res_mysql_rbd': 'ocf:ceph:rbd', |
142 | + 'res_mysql_fs': 'ocf:heartbeat:Filesystem', |
143 | + 'res_mysql_vip': 'ocf:heartbeat:IPaddr2', |
144 | + 'res_mysqld': 'upstart:mysql', |
145 | + } |
146 | + |
147 | + rbd_name = utils.config_get('rbd-name') |
148 | + resource_params = { |
149 | + 'res_mysql_rbd': 'params name="%s" pool="%s" user="%s" ' |
150 | + 'secret="%s"' % \ |
151 | + (rbd_name, POOL_NAME, |
152 | + SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), |
153 | + 'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' |
154 | + 'fstype="ext4" op start start-delay="10s"' % \ |
155 | + (POOL_NAME, rbd_name, DATA_SRC_DST), |
156 | + 'res_mysql_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ |
157 | + (vip, vip_cidr, vip_iface), |
158 | + 'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"', |
159 | + } |
160 | + |
161 | + groups = { |
162 | + 'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld', |
163 | + } |
164 | + |
165 | + for rel_id in utils.relation_ids('ha'): |
166 | + utils.relation_set(rid=rel_id, |
167 | + block_storage=block_storage, |
168 | + corosync_bindiface=corosync_bindiface, |
169 | + corosync_mcastport=corosync_mcastport, |
170 | + resources=resources, |
171 | + resource_params=resource_params, |
172 | + init_services=init_services, |
173 | + groups=groups) |
174 | + |
175 | + |
176 | +def ha_relation_changed(): |
177 | + clustered = utils.relation_get('clustered') |
178 | + if (clustered and cluster.is_leader(LEADER_RES)): |
179 | + utils.juju_log('INFO', 'Cluster configured, notifying other services') |
180 | + # Tell all related services to start using the VIP |
181 | + for r_id in utils.relation_ids('shared-db'): |
182 | + utils.relation_set(rid=r_id, |
183 | + db_host=utils.config_get('vip')) |
184 | + |
185 | + |
186 | +def ceph_joined(): |
187 | + utils.juju_log('INFO', 'Start Ceph Relation Joined') |
188 | + ceph.install() |
189 | + utils.juju_log('INFO', 'Finish Ceph Relation Joined') |
190 | + |
191 | + |
192 | +def ceph_changed(): |
193 | + utils.juju_log('INFO', 'Start Ceph Relation Changed') |
194 | + auth = utils.relation_get('auth') |
195 | + key = utils.relation_get('key') |
196 | + if None in [auth, key]: |
197 | + utils.juju_log('INFO', 'Missing key or auth in relation') |
198 | + return |
199 | + |
200 | + ceph.configure(service=SERVICE_NAME, key=key, auth=auth) |
201 | + |
202 | + if cluster.eligible_leader(LEADER_RES): |
203 | + sizemb = int(utils.config_get('block-size')) * 1024 |
204 | + rbd_img = utils.config_get('rbd-name') |
205 | + blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img) |
206 | + ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME, |
207 | + rbd_img=rbd_img, sizemb=sizemb, |
208 | + fstype='ext4', mount_point=DATA_SRC_DST, |
209 | + blk_device=blk_device, |
210 | + system_services=['mysql']) |
211 | + else: |
212 | + utils.juju_log('INFO', |
213 | + 'This is not the peer leader. Not configuring RBD.') |
214 | + # Stopping MySQL |
215 | + if utils.running('mysql'): |
216 | + utils.juju_log('INFO', 'Stopping MySQL...') |
217 | + utils.stop('mysql') |
218 | + |
219 | + # If 'ha' relation has been made before the 'ceph' relation |
220 | + # it is important to make sure the ha-relation data is being |
221 | + # sent. |
222 | + if utils.is_relation_made('ha'): |
223 | + utils.juju_log('INFO', |
224 | + '*ha* relation exists. Making sure the ha' |
225 | + ' relation data is sent.') |
226 | + ha_relation_joined() |
227 | + return |
228 | + |
229 | + utils.juju_log('INFO', 'Finish Ceph Relation Changed') |
230 | + |
231 | + |
232 | +hooks = { |
233 | + "ha-relation-joined": ha_relation_joined, |
234 | + "ha-relation-changed": ha_relation_changed, |
235 | + "ceph-relation-joined": ceph_joined, |
236 | + "ceph-relation-changed": ceph_changed, |
237 | +} |
238 | + |
239 | +utils.do_hooks(hooks) |
240 | |
241 | === modified file 'hooks/install' |
242 | --- hooks/install 2012-11-01 21:49:21 +0000 |
243 | +++ hooks/install 2013-03-26 14:36:21 +0000 |
244 | @@ -3,8 +3,9 @@ |
245 | apt-get update |
246 | apt-get install -y debconf-utils python-mysqldb uuid pwgen dnsutils charm-helper-sh || exit 1 |
247 | |
248 | -PASSFILE=/var/lib/juju/mysql.passwd |
249 | +PASSFILE=/var/lib/mysql/mysql.passwd |
250 | if ! [ -f $PASSFILE ] ; then |
251 | + mkdir -p /var/lib/mysql |
252 | touch $PASSFILE |
253 | fi |
254 | chmod 0600 $PASSFILE |
255 | |
256 | === added directory 'hooks/lib' |
257 | === added file 'hooks/lib/__init__.py' |
258 | === added file 'hooks/lib/ceph_utils.py' |
259 | --- hooks/lib/ceph_utils.py 1970-01-01 00:00:00 +0000 |
260 | +++ hooks/lib/ceph_utils.py 2013-03-26 14:36:21 +0000 |
261 | @@ -0,0 +1,256 @@ |
262 | +# |
263 | +# Copyright 2012 Canonical Ltd. |
264 | +# |
265 | +# This file is sourced from lp:openstack-charm-helpers |
266 | +# |
267 | +# Authors: |
268 | +# James Page <james.page@ubuntu.com> |
269 | +# Adam Gandelman <adamg@ubuntu.com> |
270 | +# |
271 | + |
272 | +import commands |
273 | +import subprocess |
274 | +import os |
275 | +import shutil |
276 | +import lib.utils as utils |
277 | + |
278 | +KEYRING = '/etc/ceph/ceph.client.%s.keyring' |
279 | +KEYFILE = '/etc/ceph/ceph.client.%s.key' |
280 | + |
281 | +CEPH_CONF = """[global] |
282 | + auth supported = %(auth)s |
283 | + keyring = %(keyring)s |
284 | + mon host = %(mon_hosts)s |
285 | +""" |
286 | + |
287 | + |
288 | +def execute(cmd): |
289 | + subprocess.check_call(cmd) |
290 | + |
291 | + |
292 | +def execute_shell(cmd): |
293 | + subprocess.check_call(cmd, shell=True) |
294 | + |
295 | + |
296 | +def install(): |
297 | + ceph_dir = "/etc/ceph" |
298 | + if not os.path.isdir(ceph_dir): |
299 | + os.mkdir(ceph_dir) |
300 | + utils.install('ceph-common') |
301 | + |
302 | + |
303 | +def rbd_exists(service, pool, rbd_img): |
304 | + (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\ |
305 | + (service, pool)) |
306 | + return rbd_img in out |
307 | + |
308 | + |
309 | +def create_rbd_image(service, pool, image, sizemb): |
310 | + cmd = [ |
311 | + 'rbd', |
312 | + 'create', |
313 | + image, |
314 | + '--size', |
315 | + str(sizemb), |
316 | + '--id', |
317 | + service, |
318 | + '--pool', |
319 | + pool |
320 | + ] |
321 | + execute(cmd) |
322 | + |
323 | + |
324 | +def pool_exists(service, name): |
325 | + (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) |
326 | + return name in out |
327 | + |
328 | + |
329 | +def create_pool(service, name): |
330 | + cmd = [ |
331 | + 'rados', |
332 | + '--id', |
333 | + service, |
334 | + 'mkpool', |
335 | + name |
336 | + ] |
337 | + execute(cmd) |
338 | + |
339 | + |
340 | +def keyfile_path(service): |
341 | + return KEYFILE % service |
342 | + |
343 | + |
344 | +def keyring_path(service): |
345 | + return KEYRING % service |
346 | + |
347 | + |
348 | +def create_keyring(service, key): |
349 | + keyring = keyring_path(service) |
350 | + if os.path.exists(keyring): |
351 | + utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring) |
352 | + cmd = [ |
353 | + 'ceph-authtool', |
354 | + keyring, |
355 | + '--create-keyring', |
356 | + '--name=client.%s' % service, |
357 | + '--add-key=%s' % key |
358 | + ] |
359 | + execute(cmd) |
360 | + utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring) |
361 | + |
362 | + |
363 | +def create_key_file(service, key): |
364 | + # create a file containing the key |
365 | + keyfile = keyfile_path(service) |
366 | + if os.path.exists(keyfile): |
367 | + utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile) |
368 | + fd = open(keyfile, 'w') |
369 | + fd.write(key) |
370 | + fd.close() |
371 | + utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile) |
372 | + |
373 | + |
374 | +def get_ceph_nodes(): |
375 | + hosts = [] |
376 | + for r_id in utils.relation_ids('ceph'): |
377 | + for unit in utils.relation_list(r_id): |
378 | + hosts.append(utils.relation_get('private-address', |
379 | + unit=unit, rid=r_id)) |
380 | + return hosts |
381 | + |
382 | + |
383 | +def configure(service, key, auth): |
384 | + create_keyring(service, key) |
385 | + create_key_file(service, key) |
386 | + hosts = get_ceph_nodes() |
387 | + mon_hosts = ",".join(map(str, hosts)) |
388 | + keyring = keyring_path(service) |
389 | + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: |
390 | + ceph_conf.write(CEPH_CONF % locals()) |
391 | + modprobe_kernel_module('rbd') |
392 | + |
393 | + |
394 | +def image_mapped(image_name): |
395 | + (rc, out) = commands.getstatusoutput('rbd showmapped') |
396 | + return image_name in out |
397 | + |
398 | + |
399 | +def map_block_storage(service, pool, image): |
400 | + cmd = [ |
401 | + 'rbd', |
402 | + 'map', |
403 | + '%s/%s' % (pool, image), |
404 | + '--user', |
405 | + service, |
406 | + '--secret', |
407 | + keyfile_path(service), |
408 | + ] |
409 | + execute(cmd) |
410 | + |
411 | + |
412 | +def filesystem_mounted(fs): |
413 | + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 |
414 | + |
415 | + |
416 | +def make_filesystem(blk_device, fstype='ext4'): |
417 | + utils.juju_log('INFO', |
418 | + 'ceph: Formatting block device %s as filesystem %s.' %\ |
419 | + (blk_device, fstype)) |
420 | + cmd = ['mkfs', '-t', fstype, blk_device] |
421 | + execute(cmd) |
422 | + |
423 | + |
424 | +def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): |
425 | + # mount block device into /mnt |
426 | + cmd = ['mount', '-t', fstype, blk_device, '/mnt'] |
427 | + execute(cmd) |
428 | + |
429 | + # copy data to /mnt |
430 | + try: |
431 | + copy_files(data_src_dst, '/mnt') |
432 | + except: |
433 | + pass |
434 | + |
435 | + # umount block device |
436 | + cmd = ['umount', '/mnt'] |
437 | + execute(cmd) |
438 | + |
439 | + _dir = os.stat(data_src_dst) |
440 | + uid = _dir.st_uid |
441 | + gid = _dir.st_gid |
442 | + |
443 | + # re-mount where the data should originally be |
444 | + cmd = ['mount', '-t', fstype, blk_device, data_src_dst] |
445 | + execute(cmd) |
446 | + |
447 | + # ensure original ownership of new mount. |
448 | + cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] |
449 | + execute(cmd) |
450 | + |
451 | + |
452 | +# TODO: re-use |
453 | +def modprobe_kernel_module(module): |
454 | + utils.juju_log('INFO', 'Loading kernel module') |
455 | + cmd = ['modprobe', module] |
456 | + execute(cmd) |
457 | + cmd = 'echo %s >> /etc/modules' % module |
458 | + execute_shell(cmd) |
459 | + |
460 | + |
461 | +def copy_files(src, dst, symlinks=False, ignore=None): |
462 | + for item in os.listdir(src): |
463 | + s = os.path.join(src, item) |
464 | + d = os.path.join(dst, item) |
465 | + if os.path.isdir(s): |
466 | + shutil.copytree(s, d, symlinks, ignore) |
467 | + else: |
468 | + shutil.copy2(s, d) |
469 | + |
470 | + |
471 | +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, |
472 | + blk_device, fstype, system_services=[]): |
473 | + """ |
474 | + To be called from the current cluster leader. |
475 | + Ensures given pool and RBD image exists, is mapped to a block device, |
476 | + and the device is formatted and mounted at the given mount_point. |
477 | + |
478 | + If formatting a device for the first time, data existing at mount_point |
479 | + will be migrated to the RBD device before being remounted. |
480 | + |
481 | + All services listed in system_services will be stopped prior to data |
482 | + migration and restarted when complete. |
483 | + """ |
484 | + # Ensure pool, RBD image, RBD mappings are in place. |
485 | + if not pool_exists(service, pool): |
486 | + utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool) |
487 | + create_pool(service, pool) |
488 | + |
489 | + if not rbd_exists(service, pool, rbd_img): |
490 | + utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img) |
491 | + create_rbd_image(service, pool, rbd_img, sizemb) |
492 | + |
493 | + if not image_mapped(rbd_img): |
494 | + utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.') |
495 | + map_block_storage(service, pool, rbd_img) |
496 | + |
497 | + # make file system |
498 | + # TODO: What happens if for whatever reason this is run again and |
499 | + # the data is already in the rbd device and/or is mounted?? |
500 | + # When it is mounted already, it will fail to make the fs |
501 | + # XXX: This is really sketchy! Need to at least add an fstab entry |
502 | + # otherwise this hook will blow away existing data if its executed |
503 | + # after a reboot. |
504 | + if not filesystem_mounted(mount_point): |
505 | + make_filesystem(blk_device, fstype) |
506 | + |
507 | + for svc in system_services: |
508 | + if utils.running(svc): |
509 | + utils.juju_log('INFO', |
510 | + 'Stopping services %s prior to migrating '\ |
511 | + 'data' % svc) |
512 | + utils.stop(svc) |
513 | + |
514 | + place_data_on_ceph(service, blk_device, mount_point, fstype) |
515 | + |
516 | + for svc in system_services: |
517 | + utils.start(svc) |
518 | |
519 | === added file 'hooks/lib/cluster_utils.py' |
520 | --- hooks/lib/cluster_utils.py 1970-01-01 00:00:00 +0000 |
521 | +++ hooks/lib/cluster_utils.py 2013-03-26 14:36:21 +0000 |
522 | @@ -0,0 +1,130 @@ |
523 | +# |
524 | +# Copyright 2012 Canonical Ltd. |
525 | +# |
526 | +# This file is sourced from lp:openstack-charm-helpers |
527 | +# |
528 | +# Authors: |
529 | +# James Page <james.page@ubuntu.com> |
530 | +# Adam Gandelman <adamg@ubuntu.com> |
531 | +# |
532 | + |
533 | +from lib.utils import ( |
534 | + juju_log, |
535 | + relation_ids, |
536 | + relation_list, |
537 | + relation_get, |
538 | + get_unit_hostname, |
539 | + config_get |
540 | + ) |
541 | +import subprocess |
542 | +import os |
543 | + |
544 | + |
545 | +def is_clustered(): |
546 | + for r_id in (relation_ids('ha') or []): |
547 | + for unit in (relation_list(r_id) or []): |
548 | + clustered = relation_get('clustered', |
549 | + rid=r_id, |
550 | + unit=unit) |
551 | + if clustered: |
552 | + return True |
553 | + return False |
554 | + |
555 | + |
556 | +def is_leader(resource): |
557 | + cmd = [ |
558 | + "crm", "resource", |
559 | + "show", resource |
560 | + ] |
561 | + try: |
562 | + status = subprocess.check_output(cmd) |
563 | + except subprocess.CalledProcessError: |
564 | + return False |
565 | + else: |
566 | + if get_unit_hostname() in status: |
567 | + return True |
568 | + else: |
569 | + return False |
570 | + |
571 | + |
572 | +def peer_units(): |
573 | + peers = [] |
574 | + for r_id in (relation_ids('cluster') or []): |
575 | + for unit in (relation_list(r_id) or []): |
576 | + peers.append(unit) |
577 | + return peers |
578 | + |
579 | + |
580 | +def oldest_peer(peers): |
581 | + local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1] |
582 | + for peer in peers: |
583 | + remote_unit_no = peer.split('/')[1] |
584 | + if remote_unit_no < local_unit_no: |
585 | + return False |
586 | + return True |
587 | + |
588 | + |
589 | +def eligible_leader(resource): |
590 | + if is_clustered(): |
591 | + if not is_leader(resource): |
592 | + juju_log('INFO', 'Deferring action to CRM leader.') |
593 | + return False |
594 | + else: |
595 | + peers = peer_units() |
596 | + if peers and not oldest_peer(peers): |
597 | + juju_log('INFO', 'Deferring action to oldest service unit.') |
598 | + return False |
599 | + return True |
600 | + |
601 | + |
602 | +def https(): |
603 | + ''' |
604 | + Determines whether enough data has been provided in configuration |
605 | + or relation data to configure HTTPS |
606 | + . |
607 | + returns: boolean |
608 | + ''' |
609 | + if config_get('use-https') == "yes": |
610 | + return True |
611 | + if config_get('ssl_cert') and config_get('ssl_key'): |
612 | + return True |
613 | + for r_id in relation_ids('identity-service'): |
614 | + for unit in relation_list(r_id): |
615 | + if (relation_get('https_keystone', rid=r_id, unit=unit) and |
616 | + relation_get('ssl_cert', rid=r_id, unit=unit) and |
617 | + relation_get('ssl_key', rid=r_id, unit=unit) and |
618 | + relation_get('ca_cert', rid=r_id, unit=unit)): |
619 | + return True |
620 | + return False |
621 | + |
622 | + |
623 | +def determine_api_port(public_port): |
624 | + ''' |
625 | + Determine correct API server listening port based on |
626 | + existence of HTTPS reverse proxy and/or haproxy. |
627 | + |
628 | + public_port: int: standard public port for given service |
629 | + |
630 | + returns: int: the correct listening port for the API service |
631 | + ''' |
632 | + i = 0 |
633 | + if len(peer_units()) > 0 or is_clustered(): |
634 | + i += 1 |
635 | + if https(): |
636 | + i += 1 |
637 | + return public_port - (i * 10) |
638 | + |
639 | + |
640 | +def determine_haproxy_port(public_port): |
641 | + ''' |
642 | + Description: Determine correct proxy listening port based on public IP + |
643 | + existence of HTTPS reverse proxy. |
644 | + |
645 | + public_port: int: standard public port for given service |
646 | + |
647 | + returns: int: the correct listening port for the HAProxy service |
648 | + ''' |
649 | + i = 0 |
650 | + if https(): |
651 | + i += 1 |
652 | + return public_port - (i * 10) |
653 | |
654 | === added file 'hooks/lib/utils.py' |
655 | --- hooks/lib/utils.py 1970-01-01 00:00:00 +0000 |
656 | +++ hooks/lib/utils.py 2013-03-26 14:36:21 +0000 |
657 | @@ -0,0 +1,283 @@ |
658 | +# |
659 | +# Copyright 2012 Canonical Ltd. |
660 | +# |
661 | +# This file is sourced from lp:openstack-charm-helpers |
662 | +# |
663 | +# Authors: |
664 | +# James Page <james.page@ubuntu.com> |
665 | +# Paul Collins <paul.collins@canonical.com> |
666 | +# Adam Gandelman <adamg@ubuntu.com> |
667 | +# |
668 | + |
669 | +import json |
670 | +import os |
671 | +import subprocess |
672 | +import socket |
673 | +import sys |
674 | + |
675 | + |
676 | +def do_hooks(hooks): |
677 | + hook = os.path.basename(sys.argv[0]) |
678 | + |
679 | + try: |
680 | + hook_func = hooks[hook] |
681 | + except KeyError: |
682 | + juju_log('INFO', |
683 | + "This charm doesn't know how to handle '{}'.".format(hook)) |
684 | + else: |
685 | + hook_func() |
686 | + |
687 | + |
688 | +def install(*pkgs): |
689 | + cmd = [ |
690 | + 'apt-get', |
691 | + '-y', |
692 | + 'install' |
693 | + ] |
694 | + for pkg in pkgs: |
695 | + cmd.append(pkg) |
696 | + subprocess.check_call(cmd) |
697 | + |
698 | +TEMPLATES_DIR = 'templates' |
699 | + |
700 | +try: |
701 | + import jinja2 |
702 | +except ImportError: |
703 | + install('python-jinja2') |
704 | + import jinja2 |
705 | + |
706 | +try: |
707 | + import dns.resolver |
708 | +except ImportError: |
709 | + install('python-dnspython') |
710 | + import dns.resolver |
711 | + |
712 | + |
713 | +def render_template(template_name, context, template_dir=TEMPLATES_DIR): |
714 | + templates = jinja2.Environment( |
715 | + loader=jinja2.FileSystemLoader(template_dir) |
716 | + ) |
717 | + template = templates.get_template(template_name) |
718 | + return template.render(context) |
719 | + |
720 | +CLOUD_ARCHIVE = \ |
721 | +""" # Ubuntu Cloud Archive |
722 | +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
723 | +""" |
724 | + |
725 | +CLOUD_ARCHIVE_POCKETS = { |
726 | + 'folsom': 'precise-updates/folsom', |
727 | + 'folsom/updates': 'precise-updates/folsom', |
728 | + 'folsom/proposed': 'precise-proposed/folsom', |
729 | + 'grizzly': 'precise-updates/grizzly', |
730 | + 'grizzly/updates': 'precise-updates/grizzly', |
731 | + 'grizzly/proposed': 'precise-proposed/grizzly' |
732 | + } |
733 | + |
734 | + |
735 | +def configure_source(): |
736 | + source = str(config_get('openstack-origin')) |
737 | + if not source: |
738 | + return |
739 | + if source.startswith('ppa:'): |
740 | + cmd = [ |
741 | + 'add-apt-repository', |
742 | + source |
743 | + ] |
744 | + subprocess.check_call(cmd) |
745 | + if source.startswith('cloud:'): |
746 | + install('ubuntu-cloud-keyring') |
747 | + pocket = source.split(':')[1] |
748 | + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
749 | + apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) |
750 | + if source.startswith('deb'): |
751 | + l = len(source.split('|')) |
752 | + if l == 2: |
753 | + (apt_line, key) = source.split('|') |
754 | + cmd = [ |
755 | + 'apt-key', |
756 | + 'adv', '--keyserver keyserver.ubuntu.com', |
757 | + '--recv-keys', key |
758 | + ] |
759 | + subprocess.check_call(cmd) |
760 | + elif l == 1: |
761 | + apt_line = source |
762 | + |
763 | + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: |
764 | + apt.write(apt_line + "\n") |
765 | + cmd = [ |
766 | + 'apt-get', |
767 | + 'update' |
768 | + ] |
769 | + subprocess.check_call(cmd) |
770 | + |
771 | +# Protocols |
772 | +TCP = 'TCP' |
773 | +UDP = 'UDP' |
774 | + |
775 | + |
776 | +def expose(port, protocol='TCP'): |
777 | + cmd = [ |
778 | + 'open-port', |
779 | + '{}/{}'.format(port, protocol) |
780 | + ] |
781 | + subprocess.check_call(cmd) |
782 | + |
783 | + |
784 | +def juju_log(severity, message): |
785 | + cmd = [ |
786 | + 'juju-log', |
787 | + '--log-level', severity, |
788 | + message |
789 | + ] |
790 | + subprocess.check_call(cmd) |
791 | + |
792 | + |
793 | +def relation_ids(relation): |
794 | + cmd = [ |
795 | + 'relation-ids', |
796 | + relation |
797 | + ] |
798 | + result = str(subprocess.check_output(cmd)).split() |
799 | + if result == "": |
800 | + return None |
801 | + else: |
802 | + return result |
803 | + |
804 | + |
805 | +def relation_list(rid): |
806 | + cmd = [ |
807 | + 'relation-list', |
808 | + '-r', rid, |
809 | + ] |
810 | + result = str(subprocess.check_output(cmd)).split() |
811 | + if result == "": |
812 | + return None |
813 | + else: |
814 | + return result |
815 | + |
816 | + |
817 | +def relation_get(attribute, unit=None, rid=None): |
818 | + cmd = [ |
819 | + 'relation-get', |
820 | + ] |
821 | + if rid: |
822 | + cmd.append('-r') |
823 | + cmd.append(rid) |
824 | + cmd.append(attribute) |
825 | + if unit: |
826 | + cmd.append(unit) |
827 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
828 | + if value == "": |
829 | + return None |
830 | + else: |
831 | + return value |
832 | + |
833 | + |
834 | +def relation_set(**kwargs): |
835 | + cmd = [ |
836 | + 'relation-set' |
837 | + ] |
838 | + args = [] |
839 | + for k, v in kwargs.items(): |
840 | + if k == 'rid': |
841 | + if v: |
842 | + cmd.append('-r') |
843 | + cmd.append(v) |
844 | + else: |
845 | + args.append('{}={}'.format(k, v)) |
846 | + cmd += args |
847 | + subprocess.check_call(cmd) |
848 | + |
849 | + |
850 | +def unit_get(attribute): |
851 | + cmd = [ |
852 | + 'unit-get', |
853 | + attribute |
854 | + ] |
855 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
856 | + if value == "": |
857 | + return None |
858 | + else: |
859 | + return value |
860 | + |
861 | + |
862 | +def config_get(attribute): |
863 | + cmd = [ |
864 | + 'config-get', |
865 | + '--format', |
866 | + 'json', |
867 | + ] |
868 | + out = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
869 | + cfg = json.loads(out) |
870 | + |
871 | + try: |
872 | + return cfg[attribute] |
873 | + except KeyError: |
874 | + return None |
875 | + |
876 | + |
877 | +def get_unit_hostname(): |
878 | + return socket.gethostname() |
879 | + |
880 | + |
881 | +def get_host_ip(hostname=unit_get('private-address')): |
882 | + try: |
883 | + # Test to see if already an IPv4 address |
884 | + socket.inet_aton(hostname) |
885 | + return hostname |
886 | + except socket.error: |
887 | + answers = dns.resolver.query(hostname, 'A') |
888 | + if answers: |
889 | + return answers[0].address |
890 | + return None |
891 | + |
892 | + |
893 | +def _svc_control(service, action): |
894 | + subprocess.check_call(['service', service, action]) |
895 | + |
896 | + |
897 | +def restart(*services): |
898 | + for service in services: |
899 | + _svc_control(service, 'restart') |
900 | + |
901 | + |
902 | +def stop(*services): |
903 | + for service in services: |
904 | + _svc_control(service, 'stop') |
905 | + |
906 | + |
907 | +def start(*services): |
908 | + for service in services: |
909 | + _svc_control(service, 'start') |
910 | + |
911 | + |
912 | +def reload(*services): |
913 | + for service in services: |
914 | + try: |
915 | + _svc_control(service, 'reload') |
916 | + except subprocess.CalledProcessError: |
917 | + # Reload failed - either service does not support reload |
918 | + # or it was not running - restart will fixup most things |
919 | + _svc_control(service, 'restart') |
920 | + |
921 | + |
922 | +def running(service): |
923 | + try: |
924 | + output = subprocess.check_output(['service', service, 'status']) |
925 | + except subprocess.CalledProcessError: |
926 | + return False |
927 | + else: |
928 | + if ("start/running" in output or |
929 | + "is running" in output): |
930 | + return True |
931 | + else: |
932 | + return False |
933 | + |
934 | + |
935 | +def is_relation_made(relation, key='private-address'): |
936 | + for r_id in (relation_ids(relation) or []): |
937 | + for unit in (relation_list(r_id) or []): |
938 | + if relation_get(key, rid=r_id, unit=unit): |
939 | + return True |
940 | + return False |
941 | |
942 | === modified file 'hooks/master-relation-changed' |
943 | --- hooks/master-relation-changed 2012-11-02 06:41:12 +0000 |
944 | +++ hooks/master-relation-changed 2013-03-26 14:36:21 +0000 |
945 | @@ -6,7 +6,7 @@ |
946 | |
947 | . /usr/share/charm-helper/sh/net.sh |
948 | |
949 | -ROOTARGS="-uroot -p`cat /var/lib/juju/mysql.passwd`" |
950 | +ROOTARGS="-uroot -p`cat /var/lib/mysql/mysql.passwd`" |
951 | snapdir=/var/www/snaps |
952 | mkdir -p $snapdir |
953 | apt-get -y install apache2 |
954 | |
955 | === modified file 'hooks/monitors.common.bash' |
956 | --- hooks/monitors.common.bash 2012-07-12 21:58:36 +0000 |
957 | +++ hooks/monitors.common.bash 2013-03-26 14:36:21 +0000 |
958 | @@ -1,4 +1,4 @@ |
959 | -MYSQL="mysql -uroot -p`cat /var/lib/juju/mysql.passwd`" |
960 | +MYSQL="mysql -uroot -p`cat /var/lib/mysql/mysql.passwd`" |
961 | monitor_user=monitors |
962 | . /usr/share/charm-helper/sh/net.sh |
963 | if [ -n "$JUJU_REMOTE_UNIT" ] ; then |
964 | |
965 | === modified symlink 'hooks/shared-db-relation-changed' |
966 | === target changed u'shared-db-relations' => u'shared_db_relations.py' |
967 | === modified symlink 'hooks/shared-db-relation-joined' |
968 | === target changed u'shared-db-relations' => u'shared_db_relations.py' |
969 | === removed file 'hooks/shared-db-relations' |
970 | --- hooks/shared-db-relations 2012-12-03 20:21:07 +0000 |
971 | +++ hooks/shared-db-relations 1970-01-01 00:00:00 +0000 |
972 | @@ -1,137 +0,0 @@ |
973 | -#!/usr/bin/python |
974 | -# |
975 | -# Create relations between a shared database to many peers. |
976 | -# Join does nothing. Peer requests access to $DATABASE from $REMOTE_HOST. |
977 | -# It's up to the hooks to ensure database exists, peer has access and |
978 | -# clean up grants after a broken/departed peer (TODO) |
979 | -# |
980 | -# Author: Adam Gandelman <adam.gandelman@canonical.com> |
981 | - |
982 | - |
983 | -from common import * |
984 | -import sys |
985 | -import subprocess |
986 | -import json |
987 | -import socket |
988 | -import os |
989 | - |
990 | - |
991 | -def pwgen(): |
992 | - return subprocess.check_output(['pwgen', '-s', '16']).strip() |
993 | - |
994 | - |
995 | -def relation_get(): |
996 | - return json.loads(subprocess.check_output( |
997 | - ['relation-get', |
998 | - '--format', |
999 | - 'json'] |
1000 | - ) |
1001 | - ) |
1002 | - |
1003 | - |
1004 | -def relation_set(**kwargs): |
1005 | - cmd = [ 'relation-set' ] |
1006 | - args = [] |
1007 | - for k, v in kwargs.items(): |
1008 | - if k == 'rid': |
1009 | - cmd.append('-r') |
1010 | - cmd.append(v) |
1011 | - else: |
1012 | - args.append('{}={}'.format(k, v)) |
1013 | - cmd += args |
1014 | - subprocess.check_call(cmd) |
1015 | - |
1016 | - |
1017 | -def shared_db_changed(): |
1018 | - |
1019 | - def configure_db(hostname, |
1020 | - database, |
1021 | - username): |
1022 | - passwd_file = "/var/lib/juju/mysql-{}.passwd"\ |
1023 | - .format(username) |
1024 | - if hostname != local_hostname: |
1025 | - remote_ip = socket.gethostbyname(hostname) |
1026 | - else: |
1027 | - remote_ip = '127.0.0.1' |
1028 | - |
1029 | - if not os.path.exists(passwd_file): |
1030 | - password = pwgen() |
1031 | - with open(passwd_file, 'w') as pfile: |
1032 | - pfile.write(password) |
1033 | - else: |
1034 | - with open(passwd_file) as pfile: |
1035 | - password = pfile.read().strip() |
1036 | - |
1037 | - if not database_exists(database): |
1038 | - create_database(database) |
1039 | - if not grant_exists(database, |
1040 | - username, |
1041 | - remote_ip): |
1042 | - create_grant(database, |
1043 | - username, |
1044 | - remote_ip, password) |
1045 | - return password |
1046 | - |
1047 | - settings = relation_get() |
1048 | - local_hostname = socket.getfqdn() |
1049 | - singleset = set([ |
1050 | - 'database', |
1051 | - 'username', |
1052 | - 'hostname' |
1053 | - ]) |
1054 | - |
1055 | - if singleset.issubset(settings): |
1056 | - # Process a single database configuration |
1057 | - password = configure_db(settings['hostname'], |
1058 | - settings['database'], |
1059 | - settings['username']) |
1060 | - relation_set(db_host=local_hostname, |
1061 | - password=password) |
1062 | - else: |
1063 | - # Process multiple database setup requests. |
1064 | - # from incoming relation data: |
1065 | - # nova_database=xxx nova_username=xxx nova_hostname=xxx |
1066 | - # quantum_database=xxx quantum_username=xxx quantum_hostname=xxx |
1067 | - # create |
1068 | - #{ |
1069 | - # "nova": { |
1070 | - # "username": xxx, |
1071 | - # "database": xxx, |
1072 | - # "hostname": xxx |
1073 | - # }, |
1074 | - # "quantum": { |
1075 | - # "username": xxx, |
1076 | - # "database": xxx, |
1077 | - # "hostname": xxx |
1078 | - # } |
1079 | - #} |
1080 | - # |
1081 | - databases = {} |
1082 | - for k, v in settings.iteritems(): |
1083 | - db = k.split('_')[0] |
1084 | - x = '_'.join(k.split('_')[1:]) |
1085 | - if db not in databases: |
1086 | - databases[db] = {} |
1087 | - databases[db][x] = v |
1088 | - return_data = {} |
1089 | - for db in databases: |
1090 | - if singleset.issubset(databases[db]): |
1091 | - return_data['_'.join([ db, 'password' ])] = \ |
1092 | - configure_db(databases[db]['hostname'], |
1093 | - databases[db]['database'], |
1094 | - databases[db]['username']) |
1095 | - relation_set(**return_data) |
1096 | - relation_set(db_host=local_hostname) |
1097 | - |
1098 | -hook = os.path.basename(sys.argv[0]) |
1099 | -hooks = { |
1100 | - "shared-db-relation-changed": shared_db_changed |
1101 | - } |
1102 | -try: |
1103 | - hook_func = hooks[hook] |
1104 | -except KeyError: |
1105 | - pass |
1106 | -else: |
1107 | - hook_func() |
1108 | - |
1109 | -sys.exit(0) |
1110 | |
1111 | === added file 'hooks/shared_db_relations.py' |
1112 | --- hooks/shared_db_relations.py 1970-01-01 00:00:00 +0000 |
1113 | +++ hooks/shared_db_relations.py 2013-03-26 14:36:21 +0000 |
1114 | @@ -0,0 +1,139 @@ |
1115 | +#!/usr/bin/python |
1116 | +# |
1117 | +# Create relations between a shared database to many peers. |
1118 | +# Join does nothing. Peer requests access to $DATABASE from $REMOTE_HOST. |
1119 | +# It's up to the hooks to ensure database exists, peer has access and |
1120 | +# clean up grants after a broken/departed peer (TODO) |
1121 | +# |
1122 | +# Author: Adam Gandelman <adam.gandelman@canonical.com> |
1123 | + |
1124 | + |
1125 | +from common import ( |
1126 | + database_exists, |
1127 | + create_database, |
1128 | + grant_exists, |
1129 | + create_grant |
1130 | + ) |
1131 | +import subprocess |
1132 | +import json |
1133 | +import socket |
1134 | +import os |
1135 | +import lib.utils as utils |
1136 | +import lib.cluster_utils as cluster |
1137 | + |
1138 | +LEADER_RES = 'res_mysql_vip' |
1139 | + |
1140 | + |
1141 | +def pwgen(): |
1142 | + return str(subprocess.check_output(['pwgen', '-s', '16'])).strip() |
1143 | + |
1144 | + |
1145 | +def relation_get(): |
1146 | + return json.loads(subprocess.check_output( |
1147 | + ['relation-get', |
1148 | + '--format', |
1149 | + 'json'] |
1150 | + ) |
1151 | + ) |
1152 | + |
1153 | + |
1154 | +def shared_db_changed(): |
1155 | + |
1156 | + def configure_db(hostname, |
1157 | + database, |
1158 | + username): |
1159 | + passwd_file = "/var/lib/mysql/mysql-{}.passwd"\ |
1160 | + .format(username) |
1161 | + if hostname != local_hostname: |
1162 | + remote_ip = socket.gethostbyname(hostname) |
1163 | + else: |
1164 | + remote_ip = '127.0.0.1' |
1165 | + |
1166 | + if not os.path.exists(passwd_file): |
1167 | + password = pwgen() |
1168 | + with open(passwd_file, 'w') as pfile: |
1169 | + pfile.write(password) |
1170 | + else: |
1171 | + with open(passwd_file) as pfile: |
1172 | + password = pfile.read().strip() |
1173 | + |
1174 | + if not database_exists(database): |
1175 | + create_database(database) |
1176 | + if not grant_exists(database, |
1177 | + username, |
1178 | + remote_ip): |
1179 | + create_grant(database, |
1180 | + username, |
1181 | + remote_ip, password) |
1182 | + return password |
1183 | + |
1184 | + if not cluster.eligible_leader(LEADER_RES): |
1185 | + utils.juju_log('INFO', |
1186 | + 'MySQL service is peered, bailing shared-db relation' |
1187 | + ' as this service unit is not the leader') |
1188 | + return |
1189 | + |
1190 | + settings = relation_get() |
1191 | + local_hostname = socket.getfqdn() |
1192 | + singleset = set([ |
1193 | + 'database', |
1194 | + 'username', |
1195 | + 'hostname' |
1196 | + ]) |
1197 | + |
1198 | + if singleset.issubset(settings): |
1199 | + # Process a single database configuration |
1200 | + password = configure_db(settings['hostname'], |
1201 | + settings['database'], |
1202 | + settings['username']) |
1203 | + if not cluster.is_clustered(): |
1204 | + utils.relation_set(db_host=local_hostname, |
1205 | + password=password) |
1206 | + else: |
1207 | + utils.relation_set(db_host=utils.config_get("vip"), |
1208 | + password=password) |
1209 | + |
1210 | + else: |
1211 | + # Process multiple database setup requests. |
1212 | + # from incoming relation data: |
1213 | + # nova_database=xxx nova_username=xxx nova_hostname=xxx |
1214 | + # quantum_database=xxx quantum_username=xxx quantum_hostname=xxx |
1215 | + # create |
1216 | + #{ |
1217 | + # "nova": { |
1218 | + # "username": xxx, |
1219 | + # "database": xxx, |
1220 | + # "hostname": xxx |
1221 | + # }, |
1222 | + # "quantum": { |
1223 | + # "username": xxx, |
1224 | + # "database": xxx, |
1225 | + # "hostname": xxx |
1226 | + # } |
1227 | + #} |
1228 | + # |
1229 | + databases = {} |
1230 | + for k, v in settings.iteritems(): |
1231 | + db = k.split('_')[0] |
1232 | + x = '_'.join(k.split('_')[1:]) |
1233 | + if db not in databases: |
1234 | + databases[db] = {} |
1235 | + databases[db][x] = v |
1236 | + return_data = {} |
1237 | + for db in databases: |
1238 | + if singleset.issubset(databases[db]): |
1239 | + return_data['_'.join([db, 'password'])] = \ |
1240 | + configure_db(databases[db]['hostname'], |
1241 | + databases[db]['database'], |
1242 | + databases[db]['username']) |
1243 | + utils.relation_set(**return_data) |
1244 | + if not cluster.is_clustered(): |
1245 | + utils.relation_set(db_host=local_hostname) |
1246 | + else: |
1247 | + utils.relation_set(db_host=utils.config_get("vip")) |
1248 | + |
1249 | +hooks = { |
1250 | + "shared-db-relation-changed": shared_db_changed |
1251 | + } |
1252 | + |
1253 | +utils.do_hooks(hooks) |
1254 | |
1255 | === modified file 'hooks/slave-relation-broken' |
1256 | --- hooks/slave-relation-broken 2011-12-06 21:23:39 +0000 |
1257 | +++ hooks/slave-relation-broken 2013-03-26 14:36:21 +0000 |
1258 | @@ -1,8 +1,8 @@ |
1259 | #!/bin/sh |
1260 | |
1261 | # Kill the replication |
1262 | -mysql -uroot -p`cat /var/lib/juju/mysql.passwd` -e 'STOP SLAVE;' |
1263 | -mysql -uroot -p`cat /var/lib/juju/mysql.passwd` -e 'RESET SLAVE;' |
1264 | +mysql -uroot -p`cat /var/lib/mysql/mysql.passwd` -e 'STOP SLAVE;' |
1265 | +mysql -uroot -p`cat /var/lib/mysql/mysql.passwd` -e 'RESET SLAVE;' |
1266 | # No longer a slave |
1267 | # XXX this may change the server-id .. not sure if thats what we |
1268 | # want! |
1269 | |
1270 | === modified file 'hooks/slave-relation-changed' |
1271 | --- hooks/slave-relation-changed 2011-12-06 21:17:17 +0000 |
1272 | +++ hooks/slave-relation-changed 2013-03-26 14:36:21 +0000 |
1273 | @@ -2,7 +2,7 @@ |
1274 | |
1275 | set -e |
1276 | |
1277 | -ROOTARGS="-uroot -p`cat /var/lib/juju/mysql.passwd`" |
1278 | +ROOTARGS="-uroot -p`cat /var/lib/mysql/mysql.passwd`" |
1279 | |
1280 | # Others can join that service but only the lowest will be the master |
1281 | # Note that we could be more automatic but for now we will wait for |
1282 | @@ -39,7 +39,7 @@ |
1283 | curl --silent --show-error $dumpurl |zcat| mysql $ROOTARGS |
1284 | # Root pw gets overwritten by import |
1285 | echo Re-setting Root Pasword -- can use ours because it hasnt been flushed |
1286 | -myrootpw=`cat /var/lib/juju/mysql.passwd` |
1287 | +myrootpw=`cat /var/lib/mysql/mysql.passwd` |
1288 | mysqladmin -uroot -p$myrootpw password $myrootpw |
1289 | # Debian packages expect debian-sys-maint@localhost to be root privileged and |
1290 | # configured in /etc/mysql/debian.cnf. we just broke that.. fix it |
1291 | |
1292 | === modified file 'hooks/upgrade-charm' |
1293 | --- hooks/upgrade-charm 2012-11-02 06:41:12 +0000 |
1294 | +++ hooks/upgrade-charm 2013-03-26 14:36:21 +0000 |
1295 | @@ -2,10 +2,26 @@ |
1296 | home=`dirname $0` |
1297 | # Remove any existing .service_user files, which will cause |
1298 | # new users/pw's to be generated, which is a good thing |
1299 | -old_service_user_files=$(ls /var/lib/juju/$.service_user) |
1300 | +old_service_user_files=$(ls /var/lib/juju/*.service_user) |
1301 | if [ -n "$old_service_user_files" ] ; then |
1302 | juju-log -l WARNING "Stale users left around, should be revoked: $(cat $old_service_user_files)" |
1303 | rm -f $old_service_user_files |
1304 | fi |
1305 | + |
1306 | +# Move service_user2 files to /var/lib/mysql as they are |
1307 | +# now stored there to support HA clustering with ceph. |
1308 | +new_service_user_files=$(ls /var/lib/juju/*.service_user2) |
1309 | +if [ -n "$new_service_user_files" ]; then |
1310 | + juju-log -l INFO "Moving service_user files [$new_service_user_files] to [/var/lib/mysql]" |
1311 | + mv $new_service_user_files /var/lib/mysql/ |
1312 | +fi |
1313 | +# Move passwd files to /var/lib/mysql as they are |
1314 | +# now stored there to support HA clustering with ceph. |
1315 | +password_files=$(ls /var/lib/juju/*.passwd) |
1316 | +if [ -n "$password_files" ]; then |
1317 | + juju-log -l INFO "Moving passwd files [$password_files] to [/var/lib/mysql]" |
1318 | + mv $password_files /var/lib/mysql/ |
1319 | +fi |
1320 | + |
1321 | $home/install |
1322 | exec $home/config-changed |
1323 | |
1324 | === modified file 'metadata.yaml' |
1325 | --- metadata.yaml 2012-08-21 00:07:19 +0000 |
1326 | +++ metadata.yaml 2013-03-26 14:36:21 +0000 |
1327 | @@ -22,6 +22,14 @@ |
1328 | local-monitors: |
1329 | interface: local-monitors |
1330 | scope: container |
1331 | +peers: |
1332 | + cluster: |
1333 | + interface: mysql-ha |
1334 | requires: |
1335 | slave: |
1336 | interface: mysql-oneway-replication |
1337 | + ceph: |
1338 | + interface: ceph-client |
1339 | + ha: |
1340 | + interface: hacluster |
1341 | + scope: container |
1342 | |
1343 | === modified file 'revision' |
1344 | --- revision 2012-11-29 16:31:14 +0000 |
1345 | +++ revision 2013-03-26 14:36:21 +0000 |
1346 | @@ -1,1 +1,1 @@ |
1347 | -165 |
1348 | +305 |
1349 | |
1350 | === added directory 'scripts' |
1351 | === added file 'scripts/add_to_cluster' |
1352 | --- scripts/add_to_cluster 1970-01-01 00:00:00 +0000 |
1353 | +++ scripts/add_to_cluster 2013-03-26 14:36:21 +0000 |
1354 | @@ -0,0 +1,2 @@ |
1355 | +#!/bin/bash |
1356 | +crm node online |
1357 | |
1358 | === added directory 'scripts/health_checks.d' |
1359 | === added file 'scripts/health_checks.d/service_crm_access' |
1360 | --- scripts/health_checks.d/service_crm_access 1970-01-01 00:00:00 +0000 |
1361 | +++ scripts/health_checks.d/service_crm_access 2013-03-26 14:36:21 +0000 |
1362 | @@ -0,0 +1,7 @@ |
1363 | +#!/bin/bash -e |
1364 | +# ensure this host is present in a CRM config and has access to crm tools. |
1365 | +# |
1366 | +# Since mysql service isn't running on the secondary/passive node, we have no |
1367 | +# other easy means for determining the health of the mysql service until this |
1368 | +# node becomes the active leader for the HA service. |
1369 | +crm status 2>/dev/null | grep -q `hostname` |
1370 | |
1371 | === added file 'scripts/remove_from_cluster' |
1372 | --- scripts/remove_from_cluster 1970-01-01 00:00:00 +0000 |
1373 | +++ scripts/remove_from_cluster 2013-03-26 14:36:21 +0000 |
1374 | @@ -0,0 +1,2 @@ |
1375 | +#!/bin/bash |
1376 | +crm node standby |
We only have a very simplistic health_check in this branch because mysql service is actually completing inoperable on the passive unit due to the ceph shared rados filesystem being shifted to the active leader of the HA service. Without the filesystem, we can't validate that mysql is in good health until this unit cycles back to active leader.