Merge lp:~gnuoy/charms/trusty/nova-compute/add-nrpe-checks into lp:~openstack-charmers-archive/charms/trusty/nova-compute/next

Proposed by Liam Young
Status: Merged
Merged at revision: 97
Proposed branch: lp:~gnuoy/charms/trusty/nova-compute/add-nrpe-checks
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-compute/next
Diff against target: 846 lines (+596/-19)
14 files modified
charm-helpers-hooks.yaml (+1/-0)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+308/-0)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+159/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+26/-12)
hooks/charmhelpers/contrib/network/ufw.py (+11/-0)
hooks/charmhelpers/contrib/openstack/context.py (+1/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+8/-2)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+2/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+6/-0)
hooks/charmhelpers/core/decorators.py (+41/-0)
hooks/charmhelpers/core/host.py (+7/-4)
hooks/charmhelpers/fetch/__init__.py (+8/-1)
hooks/nova_compute_hooks.py (+17/-0)
unit_tests/test_nova_compute_hooks.py (+1/-0)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/nova-compute/add-nrpe-checks
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+246158@code.launchpad.net

Description of the change

Add nrpe support. Based on branch from bradm with a few tweaks

To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #667 nova-compute-next for gnuoy mp246158
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/667/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #696 nova-compute-next for gnuoy mp246158
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/696/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #852 nova-compute-next for gnuoy mp246158
    AMULET OK: passed

Build: http://10.245.162.77:8080/job/charm_amulet_test/852/

Revision history for this message
Liam Young (gnuoy) wrote :

<jamespage> gnuoy, as they are re-syncs + tweaks to the nrpe stuff in the charms, I'm happy to give a conditional +1 across the board based on osci checking things out OK
<gnuoy> jamespage, I'll take that! thanks
...
<gnuoy> jamespage, osci is still working through. But on the subject of those mps, does your +1 stand for branches with no amulet tests?
<jamespage> gnuoy, yes

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2014-12-11 14:51:25 +0000
3+++ charm-helpers-hooks.yaml 2015-01-12 14:02:09 +0000
4@@ -11,3 +11,4 @@
5 - contrib.network
6 - contrib.python.packages
7 - payload.execd
8+ - contrib.charmsupport
9
10=== added directory 'files'
11=== added directory 'hooks/charmhelpers/contrib/charmsupport'
12=== added file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
13=== added file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
14--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
15+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-01-12 14:02:09 +0000
16@@ -0,0 +1,308 @@
17+"""Compatibility with the nrpe-external-master charm"""
18+# Copyright 2012 Canonical Ltd.
19+#
20+# Authors:
21+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22+
23+import subprocess
24+import pwd
25+import grp
26+import os
27+import re
28+import shlex
29+import yaml
30+
31+from charmhelpers.core.hookenv import (
32+ config,
33+ local_unit,
34+ log,
35+ relation_ids,
36+ relation_set,
37+ relations_of_type,
38+)
39+
40+from charmhelpers.core.host import service
41+
42+# This module adds compatibility with the nrpe-external-master and plain nrpe
43+# subordinate charms. To use it in your charm:
44+#
45+# 1. Update metadata.yaml
46+#
47+# provides:
48+# (...)
49+# nrpe-external-master:
50+# interface: nrpe-external-master
51+# scope: container
52+#
53+# and/or
54+#
55+# provides:
56+# (...)
57+# local-monitors:
58+# interface: local-monitors
59+# scope: container
60+
61+#
62+# 2. Add the following to config.yaml
63+#
64+# nagios_context:
65+# default: "juju"
66+# type: string
67+# description: |
68+# Used by the nrpe subordinate charms.
69+# A string that will be prepended to instance name to set the host name
70+# in nagios. So for instance the hostname would be something like:
71+# juju-myservice-0
72+# If you're running multiple environments with the same services in them
73+# this allows you to differentiate between them.
74+# nagios_servicegroups:
75+# default: ""
76+# type: string
77+# description: |
78+# A comma-separated list of nagios servicegroups.
79+# If left empty, the nagios_context will be used as the servicegroup
80+#
81+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
82+#
83+# 4. Update your hooks.py with something like this:
84+#
85+# from charmsupport.nrpe import NRPE
86+# (...)
87+# def update_nrpe_config():
88+# nrpe_compat = NRPE()
89+# nrpe_compat.add_check(
90+# shortname = "myservice",
91+# description = "Check MyService",
92+# check_cmd = "check_http -w 2 -c 10 http://localhost"
93+# )
94+# nrpe_compat.add_check(
95+# "myservice_other",
96+# "Check for widget failures",
97+# check_cmd = "/srv/myapp/scripts/widget_check"
98+# )
99+# nrpe_compat.write()
100+#
101+# def config_changed():
102+# (...)
103+# update_nrpe_config()
104+#
105+# def nrpe_external_master_relation_changed():
106+# update_nrpe_config()
107+#
108+# def local_monitors_relation_changed():
109+# update_nrpe_config()
110+#
111+# 5. ln -s hooks.py nrpe-external-master-relation-changed
112+# ln -s hooks.py local-monitors-relation-changed
113+
114+
115+class CheckException(Exception):
116+ pass
117+
118+
119+class Check(object):
120+ shortname_re = '[A-Za-z0-9-_]+$'
121+ service_template = ("""
122+#---------------------------------------------------
123+# This file is Juju managed
124+#---------------------------------------------------
125+define service {{
126+ use active-service
127+ host_name {nagios_hostname}
128+ service_description {nagios_hostname}[{shortname}] """
129+ """{description}
130+ check_command check_nrpe!{command}
131+ servicegroups {nagios_servicegroup}
132+}}
133+""")
134+
135+ def __init__(self, shortname, description, check_cmd):
136+ super(Check, self).__init__()
137+ # XXX: could be better to calculate this from the service name
138+ if not re.match(self.shortname_re, shortname):
139+ raise CheckException("shortname must match {}".format(
140+ Check.shortname_re))
141+ self.shortname = shortname
142+ self.command = "check_{}".format(shortname)
143+ # Note: a set of invalid characters is defined by the
144+ # Nagios server config
145+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
146+ self.description = description
147+ self.check_cmd = self._locate_cmd(check_cmd)
148+
149+ def _locate_cmd(self, check_cmd):
150+ search_path = (
151+ '/usr/lib/nagios/plugins',
152+ '/usr/local/lib/nagios/plugins',
153+ )
154+ parts = shlex.split(check_cmd)
155+ for path in search_path:
156+ if os.path.exists(os.path.join(path, parts[0])):
157+ command = os.path.join(path, parts[0])
158+ if len(parts) > 1:
159+ command += " " + " ".join(parts[1:])
160+ return command
161+ log('Check command not found: {}'.format(parts[0]))
162+ return ''
163+
164+ def write(self, nagios_context, hostname, nagios_servicegroups=None):
165+ nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
166+ self.command)
167+ with open(nrpe_check_file, 'w') as nrpe_check_config:
168+ nrpe_check_config.write("# check {}\n".format(self.shortname))
169+ nrpe_check_config.write("command[{}]={}\n".format(
170+ self.command, self.check_cmd))
171+
172+ if not os.path.exists(NRPE.nagios_exportdir):
173+ log('Not writing service config as {} is not accessible'.format(
174+ NRPE.nagios_exportdir))
175+ else:
176+ self.write_service_config(nagios_context, hostname,
177+ nagios_servicegroups)
178+
179+ def write_service_config(self, nagios_context, hostname,
180+ nagios_servicegroups=None):
181+ for f in os.listdir(NRPE.nagios_exportdir):
182+ if re.search('.*{}.cfg'.format(self.command), f):
183+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
184+
185+ if not nagios_servicegroups:
186+ nagios_servicegroups = nagios_context
187+
188+ templ_vars = {
189+ 'nagios_hostname': hostname,
190+ 'nagios_servicegroup': nagios_servicegroups,
191+ 'description': self.description,
192+ 'shortname': self.shortname,
193+ 'command': self.command,
194+ }
195+ nrpe_service_text = Check.service_template.format(**templ_vars)
196+ nrpe_service_file = '{}/service__{}_{}.cfg'.format(
197+ NRPE.nagios_exportdir, hostname, self.command)
198+ with open(nrpe_service_file, 'w') as nrpe_service_config:
199+ nrpe_service_config.write(str(nrpe_service_text))
200+
201+ def run(self):
202+ subprocess.call(self.check_cmd)
203+
204+
205+class NRPE(object):
206+ nagios_logdir = '/var/log/nagios'
207+ nagios_exportdir = '/var/lib/nagios/export'
208+ nrpe_confdir = '/etc/nagios/nrpe.d'
209+
210+ def __init__(self, hostname=None):
211+ super(NRPE, self).__init__()
212+ self.config = config()
213+ self.nagios_context = self.config['nagios_context']
214+ if 'nagios_servicegroups' in self.config:
215+ self.nagios_servicegroups = self.config['nagios_servicegroups']
216+ else:
217+ self.nagios_servicegroups = 'juju'
218+ self.unit_name = local_unit().replace('/', '-')
219+ if hostname:
220+ self.hostname = hostname
221+ else:
222+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
223+ self.checks = []
224+
225+ def add_check(self, *args, **kwargs):
226+ self.checks.append(Check(*args, **kwargs))
227+
228+ def write(self):
229+ try:
230+ nagios_uid = pwd.getpwnam('nagios').pw_uid
231+ nagios_gid = grp.getgrnam('nagios').gr_gid
232+ except:
233+ log("Nagios user not set up, nrpe checks not updated")
234+ return
235+
236+ if not os.path.exists(NRPE.nagios_logdir):
237+ os.mkdir(NRPE.nagios_logdir)
238+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
239+
240+ nrpe_monitors = {}
241+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
242+ for nrpecheck in self.checks:
243+ nrpecheck.write(self.nagios_context, self.hostname,
244+ self.nagios_servicegroups)
245+ nrpe_monitors[nrpecheck.shortname] = {
246+ "command": nrpecheck.command,
247+ }
248+
249+ service('restart', 'nagios-nrpe-server')
250+
251+ for rid in relation_ids("local-monitors"):
252+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
253+
254+
255+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
256+ """
257+ Query relation with nrpe subordinate, return the nagios_host_context
258+
259+ :param str relation_name: Name of relation nrpe sub joined to
260+ """
261+ for rel in relations_of_type(relation_name):
262+ if 'nagios_hostname' in rel:
263+ return rel['nagios_host_context']
264+
265+
266+def get_nagios_hostname(relation_name='nrpe-external-master'):
267+ """
268+ Query relation with nrpe subordinate, return the nagios_hostname
269+
270+ :param str relation_name: Name of relation nrpe sub joined to
271+ """
272+ for rel in relations_of_type(relation_name):
273+ if 'nagios_hostname' in rel:
274+ return rel['nagios_hostname']
275+
276+
277+def get_nagios_unit_name(relation_name='nrpe-external-master'):
278+ """
279+ Return the nagios unit name prepended with host_context if needed
280+
281+ :param str relation_name: Name of relation nrpe sub joined to
282+ """
283+ host_context = get_nagios_hostcontext(relation_name)
284+ if host_context:
285+ unit = "%s:%s" % (host_context, local_unit())
286+ else:
287+ unit = local_unit()
288+ return unit
289+
290+
291+def add_init_service_checks(nrpe, services, unit_name):
292+ """
293+ Add checks for each service in list
294+
295+ :param NRPE nrpe: NRPE object to add check to
296+ :param list services: List of services to check
297+ :param str unit_name: Unit name to use in check description
298+ """
299+ for svc in services:
300+ upstart_init = '/etc/init/%s.conf' % svc
301+ sysv_init = '/etc/init.d/%s' % svc
302+ if os.path.exists(upstart_init):
303+ nrpe.add_check(
304+ shortname=svc,
305+ description='process check {%s}' % unit_name,
306+ check_cmd='check_upstart_job %s' % svc
307+ )
308+ elif os.path.exists(sysv_init):
309+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
310+ cron_file = ('*/5 * * * * root '
311+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
312+ '-s /etc/init.d/%s status > '
313+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
314+ svc)
315+ )
316+ f = open(cronpath, 'w')
317+ f.write(cron_file)
318+ f.close()
319+ nrpe.add_check(
320+ shortname=svc,
321+ description='process check {%s}' % unit_name,
322+ check_cmd='check_status_file.py -f '
323+ '/var/lib/nagios/service-check-%s.txt' % svc,
324+ )
325
326=== added file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
327--- hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
328+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 2015-01-12 14:02:09 +0000
329@@ -0,0 +1,159 @@
330+'''
331+Functions for managing volumes in juju units. One volume is supported per unit.
332+Subordinates may have their own storage, provided it is on its own partition.
333+
334+Configuration stanzas::
335+
336+ volume-ephemeral:
337+ type: boolean
338+ default: true
339+ description: >
340+ If false, a volume is mounted as sepecified in "volume-map"
341+ If true, ephemeral storage will be used, meaning that log data
342+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
343+ volume-map:
344+ type: string
345+ default: {}
346+ description: >
347+ YAML map of units to device names, e.g:
348+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
349+ Service units will raise a configure-error if volume-ephemeral
350+ is 'true' and no volume-map value is set. Use 'juju set' to set a
351+ value and 'juju resolved' to complete configuration.
352+
353+Usage::
354+
355+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
356+ from charmsupport.hookenv import log, ERROR
357+ def post_mount_hook():
358+ stop_service('myservice')
359+ def post_mount_hook():
360+ start_service('myservice')
361+
362+ if __name__ == '__main__':
363+ try:
364+ configure_volume(before_change=pre_mount_hook,
365+ after_change=post_mount_hook)
366+ except VolumeConfigurationError:
367+ log('Storage could not be configured', ERROR)
368+
369+'''
370+
371+# XXX: Known limitations
372+# - fstab is neither consulted nor updated
373+
374+import os
375+from charmhelpers.core import hookenv
376+from charmhelpers.core import host
377+import yaml
378+
379+
380+MOUNT_BASE = '/srv/juju/volumes'
381+
382+
383+class VolumeConfigurationError(Exception):
384+ '''Volume configuration data is missing or invalid'''
385+ pass
386+
387+
388+def get_config():
389+ '''Gather and sanity-check volume configuration data'''
390+ volume_config = {}
391+ config = hookenv.config()
392+
393+ errors = False
394+
395+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
396+ volume_config['ephemeral'] = True
397+ else:
398+ volume_config['ephemeral'] = False
399+
400+ try:
401+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
402+ except yaml.YAMLError as e:
403+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
404+ hookenv.ERROR)
405+ errors = True
406+ if volume_map is None:
407+ # probably an empty string
408+ volume_map = {}
409+ elif not isinstance(volume_map, dict):
410+ hookenv.log("Volume-map should be a dictionary, not {}".format(
411+ type(volume_map)))
412+ errors = True
413+
414+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
415+ if volume_config['device'] and volume_config['ephemeral']:
416+ # asked for ephemeral storage but also defined a volume ID
417+ hookenv.log('A volume is defined for this unit, but ephemeral '
418+ 'storage was requested', hookenv.ERROR)
419+ errors = True
420+ elif not volume_config['device'] and not volume_config['ephemeral']:
421+ # asked for permanent storage but did not define volume ID
422+ hookenv.log('Ephemeral storage was requested, but there is no volume '
423+ 'defined for this unit.', hookenv.ERROR)
424+ errors = True
425+
426+ unit_mount_name = hookenv.local_unit().replace('/', '-')
427+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
428+
429+ if errors:
430+ return None
431+ return volume_config
432+
433+
434+def mount_volume(config):
435+ if os.path.exists(config['mountpoint']):
436+ if not os.path.isdir(config['mountpoint']):
437+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
438+ raise VolumeConfigurationError()
439+ else:
440+ host.mkdir(config['mountpoint'])
441+ if os.path.ismount(config['mountpoint']):
442+ unmount_volume(config)
443+ if not host.mount(config['device'], config['mountpoint'], persist=True):
444+ raise VolumeConfigurationError()
445+
446+
447+def unmount_volume(config):
448+ if os.path.ismount(config['mountpoint']):
449+ if not host.umount(config['mountpoint'], persist=True):
450+ raise VolumeConfigurationError()
451+
452+
453+def managed_mounts():
454+ '''List of all mounted managed volumes'''
455+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
456+
457+
458+def configure_volume(before_change=lambda: None, after_change=lambda: None):
459+ '''Set up storage (or don't) according to the charm's volume configuration.
460+ Returns the mount point or "ephemeral". before_change and after_change
461+ are optional functions to be called if the volume configuration changes.
462+ '''
463+
464+ config = get_config()
465+ if not config:
466+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
467+ raise VolumeConfigurationError()
468+
469+ if config['ephemeral']:
470+ if os.path.ismount(config['mountpoint']):
471+ before_change()
472+ unmount_volume(config)
473+ after_change()
474+ return 'ephemeral'
475+ else:
476+ # persistent storage
477+ if os.path.ismount(config['mountpoint']):
478+ mounts = dict(managed_mounts())
479+ if mounts.get(config['mountpoint']) != config['device']:
480+ before_change()
481+ unmount_volume(config)
482+ mount_volume(config)
483+ after_change()
484+ else:
485+ before_change()
486+ mount_volume(config)
487+ after_change()
488+ return config['mountpoint']
489
490=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
491--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-12-15 09:10:57 +0000
492+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-01-12 14:02:09 +0000
493@@ -13,6 +13,7 @@
494
495 import subprocess
496 import os
497+
498 from socket import gethostname as get_unit_hostname
499
500 import six
501@@ -28,12 +29,19 @@
502 WARNING,
503 unit_get,
504 )
505+from charmhelpers.core.decorators import (
506+ retry_on_exception,
507+)
508
509
510 class HAIncompleteConfig(Exception):
511 pass
512
513
514+class CRMResourceNotFound(Exception):
515+ pass
516+
517+
518 def is_elected_leader(resource):
519 """
520 Returns True if the charm executing this is the elected cluster leader.
521@@ -68,24 +76,30 @@
522 return False
523
524
525-def is_crm_leader(resource):
526+@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
527+def is_crm_leader(resource, retry=False):
528 """
529 Returns True if the charm calling this is the elected corosync leader,
530 as returned by calling the external "crm" command.
531+
532+ We allow this operation to be retried to avoid the possibility of getting a
533+ false negative. See LP #1396246 for more info.
534 """
535- cmd = [
536- "crm", "resource",
537- "show", resource
538- ]
539+ cmd = ['crm', 'resource', 'show', resource]
540 try:
541- status = subprocess.check_output(cmd).decode('UTF-8')
542+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
543+ if not isinstance(status, six.text_type):
544+ status = six.text_type(status, "utf-8")
545 except subprocess.CalledProcessError:
546- return False
547- else:
548- if get_unit_hostname() in status:
549- return True
550- else:
551- return False
552+ status = None
553+
554+ if status and get_unit_hostname() in status:
555+ return True
556+
557+ if status and "resource %s is NOT running" % (resource) in status:
558+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
559+
560+ return False
561
562
563 def is_leader(resource):
564
565=== modified file 'hooks/charmhelpers/contrib/network/ufw.py'
566--- hooks/charmhelpers/contrib/network/ufw.py 2014-12-10 20:28:56 +0000
567+++ hooks/charmhelpers/contrib/network/ufw.py 2015-01-12 14:02:09 +0000
568@@ -54,6 +54,17 @@
569 if is_enabled():
570 return True
571
572+ if not os.path.isdir('/proc/sys/net/ipv6'):
573+ # disable IPv6 support in ufw
574+ hookenv.log("This machine doesn't have IPv6 enabled", level="INFO")
575+ exit_code = subprocess.call(['sed', '-i', 's/IPV6=yes/IPV6=no/g',
576+ '/etc/default/ufw'])
577+ if exit_code == 0:
578+ hookenv.log('IPv6 support in ufw disabled', level='INFO')
579+ else:
580+ hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
581+ raise Exception("Couldn't disable IPv6 support in ufw")
582+
583 output = subprocess.check_output(['ufw', 'enable'],
584 env={'LANG': 'en_US',
585 'PATH': os.environ['PATH']})
586
587=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
588--- hooks/charmhelpers/contrib/openstack/context.py 2014-12-15 10:28:47 +0000
589+++ hooks/charmhelpers/contrib/openstack/context.py 2015-01-12 14:02:09 +0000
590@@ -491,6 +491,7 @@
591 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
592
593 if config('prefer-ipv6'):
594+ ctxt['ipv6'] = True
595 ctxt['local_host'] = 'ip6-localhost'
596 ctxt['haproxy_host'] = '::'
597 ctxt['stat_port'] = ':::8888'
598
599=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
600--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-12-15 10:28:47 +0000
601+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-01-12 14:02:09 +0000
602@@ -152,9 +152,15 @@
603 database=config('neutron-database'),
604 relation_prefix='neutron',
605 ssl_dir=NEUTRON_CONF_DIR)],
606- 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
607+ 'services': ['calico-felix',
608+ 'bird',
609+ 'neutron-dhcp-agent',
610+ 'nova-api-metadata'],
611 'packages': [[headers_package()] + determine_dkms_package(),
612- ['calico-compute', 'bird', 'neutron-dhcp-agent']],
613+ ['calico-compute',
614+ 'bird',
615+ 'neutron-dhcp-agent',
616+ 'nova-api-metadata']],
617 'server_packages': ['neutron-server', 'calico-control'],
618 'server_services': ['neutron-server']
619 }
620
621=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
622--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-12-15 09:10:57 +0000
623+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-01-12 14:02:09 +0000
624@@ -38,7 +38,9 @@
625 {% for service, ports in service_ports.items() -%}
626 frontend tcp-in_{{ service }}
627 bind *:{{ ports[0] }}
628+ {% if ipv6 -%}
629 bind :::{{ ports[0] }}
630+ {% endif -%}
631 {% for frontend in frontends -%}
632 acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
633 use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
634
635=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
636--- hooks/charmhelpers/contrib/openstack/utils.py 2014-12-15 10:28:47 +0000
637+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-01-12 14:02:09 +0000
638@@ -53,6 +53,7 @@
639 ('saucy', 'havana'),
640 ('trusty', 'icehouse'),
641 ('utopic', 'juno'),
642+ ('vivid', 'kilo'),
643 ])
644
645
646@@ -64,6 +65,7 @@
647 ('2013.2', 'havana'),
648 ('2014.1', 'icehouse'),
649 ('2014.2', 'juno'),
650+ ('2015.1', 'kilo'),
651 ])
652
653 # The ugly duckling
654@@ -84,6 +86,7 @@
655 ('2.0.0', 'juno'),
656 ('2.1.0', 'juno'),
657 ('2.2.0', 'juno'),
658+ ('2.2.1', 'kilo'),
659 ])
660
661 DEFAULT_LOOPBACK_SIZE = '5G'
662@@ -289,6 +292,9 @@
663 'juno': 'trusty-updates/juno',
664 'juno/updates': 'trusty-updates/juno',
665 'juno/proposed': 'trusty-proposed/juno',
666+ 'kilo': 'trusty-updates/kilo',
667+ 'kilo/updates': 'trusty-updates/kilo',
668+ 'kilo/proposed': 'trusty-proposed/kilo',
669 }
670
671 try:
672
673=== added file 'hooks/charmhelpers/core/decorators.py'
674--- hooks/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000
675+++ hooks/charmhelpers/core/decorators.py 2015-01-12 14:02:09 +0000
676@@ -0,0 +1,41 @@
677+#
678+# Copyright 2014 Canonical Ltd.
679+#
680+# Authors:
681+# Edward Hope-Morley <opentastic@gmail.com>
682+#
683+
684+import time
685+
686+from charmhelpers.core.hookenv import (
687+ log,
688+ INFO,
689+)
690+
691+
692+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
693+ """If the decorated function raises exception exc_type, allow num_retries
694+ retry attempts before raise the exception.
695+ """
696+ def _retry_on_exception_inner_1(f):
697+ def _retry_on_exception_inner_2(*args, **kwargs):
698+ retries = num_retries
699+ multiplier = 1
700+ while True:
701+ try:
702+ return f(*args, **kwargs)
703+ except exc_type:
704+ if not retries:
705+ raise
706+
707+ delay = base_delay * multiplier
708+ multiplier += 1
709+ log("Retrying '%s' %d more times (delay=%s)" %
710+ (f.__name__, retries, delay), level=INFO)
711+ retries -= 1
712+ if delay:
713+ time.sleep(delay)
714+
715+ return _retry_on_exception_inner_2
716+
717+ return _retry_on_exception_inner_1
718
719=== modified file 'hooks/charmhelpers/core/host.py'
720--- hooks/charmhelpers/core/host.py 2014-12-15 10:28:47 +0000
721+++ hooks/charmhelpers/core/host.py 2015-01-12 14:02:09 +0000
722@@ -162,13 +162,16 @@
723 uid = pwd.getpwnam(owner).pw_uid
724 gid = grp.getgrnam(group).gr_gid
725 realpath = os.path.abspath(path)
726- if os.path.exists(realpath):
727- if force and not os.path.isdir(realpath):
728+ path_exists = os.path.exists(realpath)
729+ if path_exists and force:
730+ if not os.path.isdir(realpath):
731 log("Removing non-directory file {} prior to mkdir()".format(path))
732 os.unlink(realpath)
733- else:
734+ os.makedirs(realpath, perms)
735+ os.chown(realpath, uid, gid)
736+ elif not path_exists:
737 os.makedirs(realpath, perms)
738- os.chown(realpath, uid, gid)
739+ os.chown(realpath, uid, gid)
740
741
742 def write_file(path, content, owner='root', group='root', perms=0o444):
743
744=== modified file 'hooks/charmhelpers/fetch/__init__.py'
745--- hooks/charmhelpers/fetch/__init__.py 2014-12-15 10:28:47 +0000
746+++ hooks/charmhelpers/fetch/__init__.py 2015-01-12 14:02:09 +0000
747@@ -64,9 +64,16 @@
748 'trusty-juno/updates': 'trusty-updates/juno',
749 'trusty-updates/juno': 'trusty-updates/juno',
750 'juno/proposed': 'trusty-proposed/juno',
751- 'juno/proposed': 'trusty-proposed/juno',
752 'trusty-juno/proposed': 'trusty-proposed/juno',
753 'trusty-proposed/juno': 'trusty-proposed/juno',
754+ # Kilo
755+ 'kilo': 'trusty-updates/kilo',
756+ 'trusty-kilo': 'trusty-updates/kilo',
757+ 'trusty-kilo/updates': 'trusty-updates/kilo',
758+ 'trusty-updates/kilo': 'trusty-updates/kilo',
759+ 'kilo/proposed': 'trusty-proposed/kilo',
760+ 'trusty-kilo/proposed': 'trusty-proposed/kilo',
761+ 'trusty-proposed/kilo': 'trusty-proposed/kilo',
762 }
763
764 # The order of this list is very important. Handlers should be listed in from
765
766=== added symlink 'hooks/compute-peer-relation-joined'
767=== target is u'nova_compute_hooks.py'
768=== added symlink 'hooks/neutron-plugin-relation-changed'
769=== target is u'nova_compute_hooks.py'
770=== added symlink 'hooks/neutron-plugin-relation-departed'
771=== target is u'nova_compute_hooks.py'
772=== added symlink 'hooks/neutron-plugin-relation-joined'
773=== target is u'nova_compute_hooks.py'
774=== modified file 'hooks/nova_compute_hooks.py'
775--- hooks/nova_compute_hooks.py 2014-12-15 10:28:47 +0000
776+++ hooks/nova_compute_hooks.py 2015-01-12 14:02:09 +0000
777@@ -48,6 +48,7 @@
778 do_openstack_upgrade,
779 public_ssh_key,
780 restart_map,
781+ services,
782 register_configs,
783 NOVA_CONF,
784 QUANTUM_CONF, NEUTRON_CONF,
785@@ -65,6 +66,7 @@
786 CEPH_SECRET_UUID,
787 assert_libvirt_imagebackend_allowed
788 )
789+from charmhelpers.contrib.charmsupport import nrpe
790 from charmhelpers.core.sysctl import create as create_sysctl
791
792 from socket import gethostname
793@@ -114,6 +116,8 @@
794
795 [compute_joined(rid) for rid in relation_ids('cloud-compute')]
796
797+ update_nrpe_config()
798+
799 CONFIGS.write_all()
800
801
802@@ -292,6 +296,7 @@
803 def upgrade_charm():
804 for r_id in relation_ids('amqp'):
805 amqp_joined(relation_id=r_id)
806+ update_nrpe_config()
807
808
809 @hooks.hook('nova-ceilometer-relation-changed')
810@@ -300,6 +305,18 @@
811 CONFIGS.write_all()
812
813
814+@hooks.hook('nrpe-external-master-relation-joined',
815+ 'nrpe-external-master-relation-changed')
816+def update_nrpe_config():
817+ # python-dbus is used by check_upstart_job
818+ apt_install('python-dbus')
819+ hostname = nrpe.get_nagios_hostname()
820+ current_unit = nrpe.get_nagios_unit_name()
821+ nrpe_setup = nrpe.NRPE(hostname=hostname)
822+ nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
823+ nrpe_setup.write()
824+
825+
826 def main():
827 try:
828 hooks.execute(sys.argv)
829
830=== added symlink 'hooks/nrpe-external-master-relation-changed'
831=== target is u'nova_compute_hooks.py'
832=== added symlink 'hooks/start'
833=== target is u'nova_compute_hooks.py'
834=== added symlink 'hooks/stop'
835=== target is u'nova_compute_hooks.py'
836=== modified file 'unit_tests/test_nova_compute_hooks.py'
837--- unit_tests/test_nova_compute_hooks.py 2014-12-15 10:28:47 +0000
838+++ unit_tests/test_nova_compute_hooks.py 2015-01-12 14:02:09 +0000
839@@ -46,6 +46,7 @@
840 'register_configs',
841 'disable_shell',
842 'enable_shell',
843+ 'update_nrpe_config',
844 # misc_utils
845 'ensure_ceph_keyring',
846 'execd_preinstall',

Subscribers

People subscribed via source and target branches