Merge ~jacekn/squid-reverseproxy-charm:master into squid-reverseproxy-charm:master

Proposed by Jacek Nykis
Status: Merged
Approved by: David Lawson
Approved revision: fef2287b380a554e7dabad17283cf6bbf6f9ac9d
Merged at revision: f6087f897ba0f06a25b80391a92994c28db36457
Proposed branch: ~jacekn/squid-reverseproxy-charm:master
Merge into: squid-reverseproxy-charm:master
Diff against target: 7572 lines (+6069/-434)
43 files modified
charm-helpers.yaml (+2/-1)
config.yaml (+1/-1)
hooks/charmhelpers/__init__.py (+97/-0)
hooks/charmhelpers/contrib/__init__.py (+13/-0)
hooks/charmhelpers/contrib/charmsupport/__init__.py (+13/-0)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+253/-21)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+19/-2)
hooks/charmhelpers/core/__init__.py (+13/-0)
hooks/charmhelpers/core/decorators.py (+55/-0)
hooks/charmhelpers/core/files.py (+43/-0)
hooks/charmhelpers/core/fstab.py (+132/-0)
hooks/charmhelpers/core/hookenv.py (+1045/-62)
hooks/charmhelpers/core/host.py (+888/-85)
hooks/charmhelpers/core/host_factory/__init__.py (+0/-0)
hooks/charmhelpers/core/host_factory/centos.py (+72/-0)
hooks/charmhelpers/core/host_factory/ubuntu.py (+90/-0)
hooks/charmhelpers/core/hugepage.py (+69/-0)
hooks/charmhelpers/core/kernel.py (+72/-0)
hooks/charmhelpers/core/kernel_factory/__init__.py (+0/-0)
hooks/charmhelpers/core/kernel_factory/centos.py (+17/-0)
hooks/charmhelpers/core/kernel_factory/ubuntu.py (+13/-0)
hooks/charmhelpers/core/services/__init__.py (+16/-0)
hooks/charmhelpers/core/services/base.py (+362/-0)
hooks/charmhelpers/core/services/helpers.py (+290/-0)
hooks/charmhelpers/core/strutils.py (+129/-0)
hooks/charmhelpers/core/sysctl.py (+58/-0)
hooks/charmhelpers/core/templating.py (+93/-0)
hooks/charmhelpers/core/unitdata.py (+525/-0)
hooks/charmhelpers/fetch/__init__.py (+145/-149)
hooks/charmhelpers/fetch/archiveurl.py (+126/-9)
hooks/charmhelpers/fetch/bzrurl.py (+53/-21)
hooks/charmhelpers/fetch/centos.py (+171/-0)
hooks/charmhelpers/fetch/giturl.py (+69/-0)
hooks/charmhelpers/fetch/snap.py (+150/-0)
hooks/charmhelpers/fetch/ubuntu.py (+592/-0)
hooks/charmhelpers/osplatform.py (+25/-0)
hooks/hooks.py (+41/-20)
hooks/install (+3/-0)
hooks/tests/test_helpers.py (+32/-21)
hooks/tests/test_nrpe_hooks.py (+20/-39)
metadata.yaml (+1/-0)
scripts/charm_helpers_sync.py (+258/-0)
templates/main_config.template (+3/-3)
Reviewer Review Type Date Requested Status
Squid Reverse Proxy Charmers Pending
Review via email: mp+353237@code.launchpad.net

Commit message

Update charm to support bionic

To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

This merge proposal is being monitored by mergebot. Change the status to Approved to merge.

Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

Change successfully merged at revision f6087f897ba0f06a25b80391a92994c28db36457

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/charm-helpers.yaml b/charm-helpers.yaml
index ffd6ac8..622437b 100644
--- a/charm-helpers.yaml
+++ b/charm-helpers.yaml
@@ -1,4 +1,5 @@
1include:1include:
2 - core2 - core
3 - fetch3 - fetch
4 - contrib.charmsupport
5\ No newline at end of file4\ No newline at end of file
5 - osplatform
6 - contrib.charmsupport
diff --git a/config.yaml b/config.yaml
index fc56126..8f98d85 100644
--- a/config.yaml
+++ b/config.yaml
@@ -59,7 +59,7 @@ options:
59 description: Maximum size of the on-disk object cache (MB). Set to zero to disable disk caching.59 description: Maximum size of the on-disk object cache (MB). Set to zero to disable disk caching.
60 cache_dir:60 cache_dir:
61 type: string61 type: string
62 default: '/var/spool/squid3'62 default: ''
63 description: The top-level directory where cache swap files will be stored.63 description: The top-level directory where cache swap files will be stored.
64 target_objs_per_dir:64 target_objs_per_dir:
65 type: int65 type: int
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
index e69de29..e7aa471 100644
--- a/hooks/charmhelpers/__init__.py
+++ b/hooks/charmhelpers/__init__.py
@@ -0,0 +1,97 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# Bootstrap charm-helpers, installing its dependencies if necessary using
16# only standard libraries.
17from __future__ import print_function
18from __future__ import absolute_import
19
20import functools
21import inspect
22import subprocess
23import sys
24
25try:
26 import six # flake8: noqa
27except ImportError:
28 if sys.version_info.major == 2:
29 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
30 else:
31 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
32 import six # flake8: noqa
33
34try:
35 import yaml # flake8: noqa
36except ImportError:
37 if sys.version_info.major == 2:
38 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
39 else:
40 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
41 import yaml # flake8: noqa
42
43
44# Holds a list of mapping of mangled function names that have been deprecated
45# using the @deprecate decorator below. This is so that the warning is only
46# printed once for each usage of the function.
47__deprecated_functions = {}
48
49
50def deprecate(warning, date=None, log=None):
51 """Add a deprecation warning the first time the function is used.
52 The date, which is a string in semi-ISO8660 format indicate the year-month
53 that the function is officially going to be removed.
54
55 usage:
56
57 @deprecate('use core/fetch/add_source() instead', '2017-04')
58 def contributed_add_source_thing(...):
59 ...
60
61 And it then prints to the log ONCE that the function is deprecated.
62 The reason for passing the logging function (log) is so that hookenv.log
63 can be used for a charm if needed.
64
65 :param warning: String to indicat where it has moved ot.
66 :param date: optional sting, in YYYY-MM format to indicate when the
67 function will definitely (probably) be removed.
68 :param log: The log function to call to log. If not, logs to stdout
69 """
70 def wrap(f):
71
72 @functools.wraps(f)
73 def wrapped_f(*args, **kwargs):
74 try:
75 module = inspect.getmodule(f)
76 file = inspect.getsourcefile(f)
77 lines = inspect.getsourcelines(f)
78 f_name = "{}-{}-{}..{}-{}".format(
79 module.__name__, file, lines[0], lines[-1], f.__name__)
80 except (IOError, TypeError):
81 # assume it was local, so just use the name of the function
82 f_name = f.__name__
83 if f_name not in __deprecated_functions:
84 __deprecated_functions[f_name] = True
85 s = "DEPRECATION WARNING: Function {} is being removed".format(
86 f.__name__)
87 if date:
88 s = "{} on/around {}".format(s, date)
89 if warning:
90 s = "{} : {}".format(s, warning)
91 if log:
92 log(s)
93 else:
94 print(s)
95 return f(*args, **kwargs)
96 return wrapped_f
97 return wrap
diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py
index e69de29..d7567b8 100644
--- a/hooks/charmhelpers/contrib/__init__.py
+++ b/hooks/charmhelpers/contrib/__init__.py
@@ -0,0 +1,13 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py
index e69de29..d7567b8 100644
--- a/hooks/charmhelpers/contrib/charmsupport/__init__.py
+++ b/hooks/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,13 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index f3bfe3f..e3d10c1 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -1,3 +1,17 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
1"""Compatibility with the nrpe-external-master charm"""15"""Compatibility with the nrpe-external-master charm"""
2# Copyright 2012 Canonical Ltd.16# Copyright 2012 Canonical Ltd.
3#17#
@@ -8,19 +22,24 @@ import subprocess
8import pwd22import pwd
9import grp23import grp
10import os24import os
25import glob
26import shutil
11import re27import re
12import shlex28import shlex
13import yaml29import yaml
1430
15from charmhelpers.core.hookenv import (31from charmhelpers.core.hookenv import (
16 config,32 config,
33 hook_name,
17 local_unit,34 local_unit,
18 log,35 log,
19 relation_ids,36 relation_ids,
20 relation_set,37 relation_set,
38 relations_of_type,
21)39)
2240
23from charmhelpers.core.host import service41from charmhelpers.core.host import service
42from charmhelpers.core import host
2443
25# This module adds compatibility with the nrpe-external-master and plain nrpe44# This module adds compatibility with the nrpe-external-master and plain nrpe
26# subordinate charms. To use it in your charm:45# subordinate charms. To use it in your charm:
@@ -54,6 +73,12 @@ from charmhelpers.core.host import service
54# juju-myservice-073# juju-myservice-0
55# If you're running multiple environments with the same services in them74# If you're running multiple environments with the same services in them
56# this allows you to differentiate between them.75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
57#82#
58# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
59#84#
@@ -85,6 +110,13 @@ from charmhelpers.core.host import service
85# def local_monitors_relation_changed():110# def local_monitors_relation_changed():
86# update_nrpe_config()111# update_nrpe_config()
87#112#
113# 4.a If your charm is a subordinate charm set primary=False
114#
115# from charmsupport.nrpe import NRPE
116# (...)
117# def update_nrpe_config():
118# nrpe_compat = NRPE(primary=False)
119#
88# 5. ln -s hooks.py nrpe-external-master-relation-changed120# 5. ln -s hooks.py nrpe-external-master-relation-changed
89# ln -s hooks.py local-monitors-relation-changed121# ln -s hooks.py local-monitors-relation-changed
90122
@@ -94,7 +126,7 @@ class CheckException(Exception):
94126
95127
96class Check(object):128class Check(object):
97 shortname_re = '[A-Za-z0-9-_]+$'129 shortname_re = '[A-Za-z0-9-_.]+$'
98 service_template = ("""130 service_template = ("""
99#---------------------------------------------------131#---------------------------------------------------
100# This file is Juju managed132# This file is Juju managed
@@ -123,12 +155,17 @@ define service {{
123 self.description = description155 self.description = description
124 self.check_cmd = self._locate_cmd(check_cmd)156 self.check_cmd = self._locate_cmd(check_cmd)
125157
158 def _get_check_filename(self):
159 return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
160
161 def _get_service_filename(self, hostname):
162 return os.path.join(NRPE.nagios_exportdir,
163 'service__{}_{}.cfg'.format(hostname, self.command))
164
126 def _locate_cmd(self, check_cmd):165 def _locate_cmd(self, check_cmd):
127 search_path = (166 search_path = (
128 '/',
129 os.path.join(os.environ['CHARM_DIR'],
130 'files/nrpe-external-master'),
131 '/usr/lib/nagios/plugins',167 '/usr/lib/nagios/plugins',
168 '/usr/local/lib/nagios/plugins',
132 )169 )
133 parts = shlex.split(check_cmd)170 parts = shlex.split(check_cmd)
134 for path in search_path:171 for path in search_path:
@@ -140,11 +177,30 @@ define service {{
140 log('Check command not found: {}'.format(parts[0]))177 log('Check command not found: {}'.format(parts[0]))
141 return ''178 return ''
142179
143 def write(self, nagios_context, hostname):180 def _remove_service_files(self):
144 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(181 if not os.path.exists(NRPE.nagios_exportdir):
145 self.command)182 return
183 for f in os.listdir(NRPE.nagios_exportdir):
184 if f.endswith('_{}.cfg'.format(self.command)):
185 os.remove(os.path.join(NRPE.nagios_exportdir, f))
186
187 def remove(self, hostname):
188 nrpe_check_file = self._get_check_filename()
189 if os.path.exists(nrpe_check_file):
190 os.remove(nrpe_check_file)
191 self._remove_service_files()
192
193 def write(self, nagios_context, hostname, nagios_servicegroups):
194 nrpe_check_file = self._get_check_filename()
146 with open(nrpe_check_file, 'w') as nrpe_check_config:195 with open(nrpe_check_file, 'w') as nrpe_check_config:
147 nrpe_check_config.write("# check {}\n".format(self.shortname))196 nrpe_check_config.write("# check {}\n".format(self.shortname))
197 if nagios_servicegroups:
198 nrpe_check_config.write(
199 "# The following header was added automatically by juju\n")
200 nrpe_check_config.write(
201 "# Modifying it will affect nagios monitoring and alerting\n")
202 nrpe_check_config.write(
203 "# servicegroups: {}\n".format(nagios_servicegroups))
148 nrpe_check_config.write("command[{}]={}\n".format(204 nrpe_check_config.write("command[{}]={}\n".format(
149 self.command, self.check_cmd))205 self.command, self.check_cmd))
150206
@@ -152,23 +208,22 @@ define service {{
152 log('Not writing service config as {} is not accessible'.format(208 log('Not writing service config as {} is not accessible'.format(
153 NRPE.nagios_exportdir))209 NRPE.nagios_exportdir))
154 else:210 else:
155 self.write_service_config(nagios_context, hostname)211 self.write_service_config(nagios_context, hostname,
212 nagios_servicegroups)
156213
157 def write_service_config(self, nagios_context, hostname):214 def write_service_config(self, nagios_context, hostname,
158 for f in os.listdir(NRPE.nagios_exportdir):215 nagios_servicegroups):
159 if re.search('.*{}.cfg'.format(self.command), f):216 self._remove_service_files()
160 os.remove(os.path.join(NRPE.nagios_exportdir, f))
161217
162 templ_vars = {218 templ_vars = {
163 'nagios_hostname': hostname,219 'nagios_hostname': hostname,
164 'nagios_servicegroup': nagios_context,220 'nagios_servicegroup': nagios_servicegroups,
165 'description': self.description,221 'description': self.description,
166 'shortname': self.shortname,222 'shortname': self.shortname,
167 'command': self.command,223 'command': self.command,
168 }224 }
169 nrpe_service_text = Check.service_template.format(**templ_vars)225 nrpe_service_text = Check.service_template.format(**templ_vars)
170 nrpe_service_file = '{}/service__{}_{}.cfg'.format(226 nrpe_service_file = self._get_service_filename(hostname)
171 NRPE.nagios_exportdir, hostname, self.command)
172 with open(nrpe_service_file, 'w') as nrpe_service_config:227 with open(nrpe_service_file, 'w') as nrpe_service_config:
173 nrpe_service_config.write(str(nrpe_service_text))228 nrpe_service_config.write(str(nrpe_service_text))
174229
@@ -180,23 +235,58 @@ class NRPE(object):
180 nagios_logdir = '/var/log/nagios'235 nagios_logdir = '/var/log/nagios'
181 nagios_exportdir = '/var/lib/nagios/export'236 nagios_exportdir = '/var/lib/nagios/export'
182 nrpe_confdir = '/etc/nagios/nrpe.d'237 nrpe_confdir = '/etc/nagios/nrpe.d'
238 homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
183239
184 def __init__(self):240 def __init__(self, hostname=None, primary=True):
185 super(NRPE, self).__init__()241 super(NRPE, self).__init__()
186 self.config = config()242 self.config = config()
243 self.primary = primary
187 self.nagios_context = self.config['nagios_context']244 self.nagios_context = self.config['nagios_context']
245 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
246 self.nagios_servicegroups = self.config['nagios_servicegroups']
247 else:
248 self.nagios_servicegroups = self.nagios_context
188 self.unit_name = local_unit().replace('/', '-')249 self.unit_name = local_unit().replace('/', '-')
189 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)250 if hostname:
251 self.hostname = hostname
252 else:
253 nagios_hostname = get_nagios_hostname()
254 if nagios_hostname:
255 self.hostname = nagios_hostname
256 else:
257 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
190 self.checks = []258 self.checks = []
259 # Iff in an nrpe-external-master relation hook, set primary status
260 relation = relation_ids('nrpe-external-master')
261 if relation:
262 log("Setting charm primary status {}".format(primary))
263 for rid in relation_ids('nrpe-external-master'):
264 relation_set(relation_id=rid, relation_settings={'primary': self.primary})
191265
192 def add_check(self, *args, **kwargs):266 def add_check(self, *args, **kwargs):
193 self.checks.append(Check(*args, **kwargs))267 self.checks.append(Check(*args, **kwargs))
194268
269 def remove_check(self, *args, **kwargs):
270 if kwargs.get('shortname') is None:
271 raise ValueError('shortname of check must be specified')
272
273 # Use sensible defaults if they're not specified - these are not
274 # actually used during removal, but they're required for constructing
275 # the Check object; check_disk is chosen because it's part of the
276 # nagios-plugins-basic package.
277 if kwargs.get('check_cmd') is None:
278 kwargs['check_cmd'] = 'check_disk'
279 if kwargs.get('description') is None:
280 kwargs['description'] = ''
281
282 check = Check(*args, **kwargs)
283 check.remove(self.hostname)
284
195 def write(self):285 def write(self):
196 try:286 try:
197 nagios_uid = pwd.getpwnam('nagios').pw_uid287 nagios_uid = pwd.getpwnam('nagios').pw_uid
198 nagios_gid = grp.getgrnam('nagios').gr_gid288 nagios_gid = grp.getgrnam('nagios').gr_gid
199 except:289 except Exception:
200 log("Nagios user not set up, nrpe checks not updated")290 log("Nagios user not set up, nrpe checks not updated")
201 return291 return
202292
@@ -207,12 +297,154 @@ class NRPE(object):
207 nrpe_monitors = {}297 nrpe_monitors = {}
208 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}298 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
209 for nrpecheck in self.checks:299 for nrpecheck in self.checks:
210 nrpecheck.write(self.nagios_context, self.hostname)300 nrpecheck.write(self.nagios_context, self.hostname,
301 self.nagios_servicegroups)
211 nrpe_monitors[nrpecheck.shortname] = {302 nrpe_monitors[nrpecheck.shortname] = {
212 "command": nrpecheck.command,303 "command": nrpecheck.command,
213 }304 }
214305
215 service('restart', 'nagios-nrpe-server')306 # update-status hooks are configured to firing every 5 minutes by
307 # default. When nagios-nrpe-server is restarted, the nagios server
308 # reports checks failing causing unneccessary alerts. Let's not restart
309 # on update-status hooks.
310 if not hook_name() == 'update-status':
311 service('restart', 'nagios-nrpe-server')
216312
217 for rid in relation_ids("local-monitors"):313 monitor_ids = relation_ids("local-monitors") + \
314 relation_ids("nrpe-external-master")
315 for rid in monitor_ids:
218 relation_set(relation_id=rid, monitors=yaml.dump(monitors))316 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
317
318
319def get_nagios_hostcontext(relation_name='nrpe-external-master'):
320 """
321 Query relation with nrpe subordinate, return the nagios_host_context
322
323 :param str relation_name: Name of relation nrpe sub joined to
324 """
325 for rel in relations_of_type(relation_name):
326 if 'nagios_host_context' in rel:
327 return rel['nagios_host_context']
328
329
330def get_nagios_hostname(relation_name='nrpe-external-master'):
331 """
332 Query relation with nrpe subordinate, return the nagios_hostname
333
334 :param str relation_name: Name of relation nrpe sub joined to
335 """
336 for rel in relations_of_type(relation_name):
337 if 'nagios_hostname' in rel:
338 return rel['nagios_hostname']
339
340
341def get_nagios_unit_name(relation_name='nrpe-external-master'):
342 """
343 Return the nagios unit name prepended with host_context if needed
344
345 :param str relation_name: Name of relation nrpe sub joined to
346 """
347 host_context = get_nagios_hostcontext(relation_name)
348 if host_context:
349 unit = "%s:%s" % (host_context, local_unit())
350 else:
351 unit = local_unit()
352 return unit
353
354
355def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
356 """
357 Add checks for each service in list
358
359 :param NRPE nrpe: NRPE object to add check to
360 :param list services: List of services to check
361 :param str unit_name: Unit name to use in check description
362 :param bool immediate_check: For sysv init, run the service check immediately
363 """
364 for svc in services:
365 # Don't add a check for these services from neutron-gateway
366 if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
367 next
368
369 upstart_init = '/etc/init/%s.conf' % svc
370 sysv_init = '/etc/init.d/%s' % svc
371
372 if host.init_is_systemd():
373 nrpe.add_check(
374 shortname=svc,
375 description='process check {%s}' % unit_name,
376 check_cmd='check_systemd.py %s' % svc
377 )
378 elif os.path.exists(upstart_init):
379 nrpe.add_check(
380 shortname=svc,
381 description='process check {%s}' % unit_name,
382 check_cmd='check_upstart_job %s' % svc
383 )
384 elif os.path.exists(sysv_init):
385 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
386 checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
387 croncmd = (
388 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
389 '-e -s /etc/init.d/%s status' % svc
390 )
391 cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
392 f = open(cronpath, 'w')
393 f.write(cron_file)
394 f.close()
395 nrpe.add_check(
396 shortname=svc,
397 description='service check {%s}' % unit_name,
398 check_cmd='check_status_file.py -f %s' % checkpath,
399 )
400 # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
401 # (LP: #1670223).
402 if immediate_check and os.path.isdir(nrpe.homedir):
403 f = open(checkpath, 'w')
404 subprocess.call(
405 croncmd.split(),
406 stdout=f,
407 stderr=subprocess.STDOUT
408 )
409 f.close()
410 os.chmod(checkpath, 0o644)
411
412
413def copy_nrpe_checks(nrpe_files_dir=None):
414 """
415 Copy the nrpe checks into place
416
417 """
418 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
419 default_nrpe_files_dir = os.path.join(
420 os.getenv('CHARM_DIR'),
421 'hooks',
422 'charmhelpers',
423 'contrib',
424 'openstack',
425 'files')
426 if not nrpe_files_dir:
427 nrpe_files_dir = default_nrpe_files_dir
428 if not os.path.exists(NAGIOS_PLUGINS):
429 os.makedirs(NAGIOS_PLUGINS)
430 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
431 if os.path.isfile(fname):
432 shutil.copy2(fname,
433 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
434
435
436def add_haproxy_checks(nrpe, unit_name):
437 """
438 Add checks for each service in list
439
440 :param NRPE nrpe: NRPE object to add check to
441 :param str unit_name: Unit name to use in check description
442 """
443 nrpe.add_check(
444 shortname='haproxy_servers',
445 description='Check HAProxy {%s}' % unit_name,
446 check_cmd='check_haproxy.sh')
447 nrpe.add_check(
448 shortname='haproxy_queue',
449 description='Check HAProxy queue depth {%s}' % unit_name,
450 check_cmd='check_haproxy_queue_depth.sh')
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
index 0f905df..7ea43f0 100644
--- a/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py
@@ -1,8 +1,23 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
1'''15'''
2Functions for managing volumes in juju units. One volume is supported per unit.16Functions for managing volumes in juju units. One volume is supported per unit.
3Subordinates may have their own storage, provided it is on its own partition.17Subordinates may have their own storage, provided it is on its own partition.
418
5Configuration stanzas:19Configuration stanzas::
20
6 volume-ephemeral:21 volume-ephemeral:
7 type: boolean22 type: boolean
8 default: true23 default: true
@@ -20,7 +35,8 @@ Configuration stanzas:
20 is 'true' and no volume-map value is set. Use 'juju set' to set a35 is 'true' and no volume-map value is set. Use 'juju set' to set a
21 value and 'juju resolved' to complete configuration.36 value and 'juju resolved' to complete configuration.
2237
23Usage:38Usage::
39
24 from charmsupport.volumes import configure_volume, VolumeConfigurationError40 from charmsupport.volumes import configure_volume, VolumeConfigurationError
25 from charmsupport.hookenv import log, ERROR41 from charmsupport.hookenv import log, ERROR
26 def post_mount_hook():42 def post_mount_hook():
@@ -34,6 +50,7 @@ Usage:
34 after_change=post_mount_hook)50 after_change=post_mount_hook)
35 except VolumeConfigurationError:51 except VolumeConfigurationError:
36 log('Storage could not be configured', ERROR)52 log('Storage could not be configured', ERROR)
53
37'''54'''
3855
39# XXX: Known limitations56# XXX: Known limitations
diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py
index e69de29..d7567b8 100644
--- a/hooks/charmhelpers/core/__init__.py
+++ b/hooks/charmhelpers/core/__init__.py
@@ -0,0 +1,13 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py
0new file mode 10064414new file mode 100644
index 0000000..6ad41ee
--- /dev/null
+++ b/hooks/charmhelpers/core/decorators.py
@@ -0,0 +1,55 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15#
16# Copyright 2014 Canonical Ltd.
17#
18# Authors:
19# Edward Hope-Morley <opentastic@gmail.com>
20#
21
22import time
23
24from charmhelpers.core.hookenv import (
25 log,
26 INFO,
27)
28
29
30def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
31 """If the decorated function raises exception exc_type, allow num_retries
32 retry attempts before raise the exception.
33 """
34 def _retry_on_exception_inner_1(f):
35 def _retry_on_exception_inner_2(*args, **kwargs):
36 retries = num_retries
37 multiplier = 1
38 while True:
39 try:
40 return f(*args, **kwargs)
41 except exc_type:
42 if not retries:
43 raise
44
45 delay = base_delay * multiplier
46 multiplier += 1
47 log("Retrying '%s' %d more times (delay=%s)" %
48 (f.__name__, retries, delay), level=INFO)
49 retries -= 1
50 if delay:
51 time.sleep(delay)
52
53 return _retry_on_exception_inner_2
54
55 return _retry_on_exception_inner_1
diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py
0new file mode 10064456new file mode 100644
index 0000000..fdd82b7
--- /dev/null
+++ b/hooks/charmhelpers/core/files.py
@@ -0,0 +1,43 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
19
20import os
21import subprocess
22
23
24def sed(filename, before, after, flags='g'):
25 """
26 Search and replaces the given pattern on filename.
27
28 :param filename: relative or absolute file path.
29 :param before: expression to be replaced (see 'man sed')
30 :param after: expression to replace with (see 'man sed')
31 :param flags: sed-compatible regex flags in example, to make
32 the search and replace case insensitive, specify ``flags="i"``.
33 The ``g`` flag is always specified regardless, so you do not
34 need to remember to include it when overriding this parameter.
35 :returns: If the sed command exit code was zero then return,
36 otherwise raise CalledProcessError.
37 """
38 expression = r's/{0}/{1}/{2}'.format(before,
39 after, flags)
40
41 return subprocess.check_call(["sed", "-i", "-r", "-e",
42 expression,
43 os.path.expanduser(filename)])
diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py
0new file mode 10064444new file mode 100644
index 0000000..d9fa915
--- /dev/null
+++ b/hooks/charmhelpers/core/fstab.py
@@ -0,0 +1,132 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18import io
19import os
20
21__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
22
23
24class Fstab(io.FileIO):
25 """This class extends file in order to implement a file reader/writer
26 for file `/etc/fstab`
27 """
28
29 class Entry(object):
30 """Entry class represents a non-comment line on the `/etc/fstab` file
31 """
32 def __init__(self, device, mountpoint, filesystem,
33 options, d=0, p=0):
34 self.device = device
35 self.mountpoint = mountpoint
36 self.filesystem = filesystem
37
38 if not options:
39 options = "defaults"
40
41 self.options = options
42 self.d = int(d)
43 self.p = int(p)
44
45 def __eq__(self, o):
46 return str(self) == str(o)
47
48 def __str__(self):
49 return "{} {} {} {} {} {}".format(self.device,
50 self.mountpoint,
51 self.filesystem,
52 self.options,
53 self.d,
54 self.p)
55
56 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
57
58 def __init__(self, path=None):
59 if path:
60 self._path = path
61 else:
62 self._path = self.DEFAULT_PATH
63 super(Fstab, self).__init__(self._path, 'rb+')
64
65 def _hydrate_entry(self, line):
66 # NOTE: use split with no arguments to split on any
67 # whitespace including tabs
68 return Fstab.Entry(*filter(
69 lambda x: x not in ('', None),
70 line.strip("\n").split()))
71
72 @property
73 def entries(self):
74 self.seek(0)
75 for line in self.readlines():
76 line = line.decode('us-ascii')
77 try:
78 if line.strip() and not line.strip().startswith("#"):
79 yield self._hydrate_entry(line)
80 except ValueError:
81 pass
82
83 def get_entry_by_attr(self, attr, value):
84 for entry in self.entries:
85 e_attr = getattr(entry, attr)
86 if e_attr == value:
87 return entry
88 return None
89
90 def add_entry(self, entry):
91 if self.get_entry_by_attr('device', entry.device):
92 return False
93
94 self.write((str(entry) + '\n').encode('us-ascii'))
95 self.truncate()
96 return entry
97
98 def remove_entry(self, entry):
99 self.seek(0)
100
101 lines = [l.decode('us-ascii') for l in self.readlines()]
102
103 found = False
104 for index, line in enumerate(lines):
105 if line.strip() and not line.strip().startswith("#"):
106 if self._hydrate_entry(line) == entry:
107 found = True
108 break
109
110 if not found:
111 return False
112
113 lines.remove(line)
114
115 self.seek(0)
116 self.write(''.join(lines).encode('us-ascii'))
117 self.truncate()
118 return True
119
120 @classmethod
121 def remove_by_mountpoint(cls, mountpoint, path=None):
122 fstab = cls(path=path)
123 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
124 if entry:
125 return fstab.remove_entry(entry)
126 return False
127
128 @classmethod
129 def add(cls, device, mountpoint, filesystem, options=None, path=None):
130 return cls(path=path).add_entry(Fstab.Entry(device,
131 mountpoint, filesystem,
132 options=options))
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 2b06706..fc57505 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -1,29 +1,61 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
1"Interactions with the Juju environment"15"Interactions with the Juju environment"
2# Copyright 2013 Canonical Ltd.16# Copyright 2013 Canonical Ltd.
3#17#
4# Authors:18# Authors:
5# Charm Helpers Developers <juju@lists.ubuntu.com>19# Charm Helpers Developers <juju@lists.ubuntu.com>
620
21from __future__ import print_function
22import copy
23from distutils.version import LooseVersion
24from functools import wraps
25from collections import namedtuple
26import glob
7import os27import os
8import json28import json
9import yaml29import yaml
30import re
10import subprocess31import subprocess
11import UserDict32import sys
33import errno
34import tempfile
35from subprocess import CalledProcessError
36
37import six
38if not six.PY3:
39 from UserDict import UserDict
40else:
41 from collections import UserDict
42
1243
13CRITICAL = "CRITICAL"44CRITICAL = "CRITICAL"
14ERROR = "ERROR"45ERROR = "ERROR"
15WARNING = "WARNING"46WARNING = "WARNING"
16INFO = "INFO"47INFO = "INFO"
17DEBUG = "DEBUG"48DEBUG = "DEBUG"
49TRACE = "TRACE"
18MARKER = object()50MARKER = object()
1951
20cache = {}52cache = {}
2153
2254
23def cached(func):55def cached(func):
24 ''' Cache return values for multiple executions of func + args56 """Cache return values for multiple executions of func + args
2557
26 For example:58 For example::
2759
28 @cached60 @cached
29 def unit_get(attribute):61 def unit_get(attribute):
@@ -32,22 +64,25 @@ def cached(func):
32 unit_get('test')64 unit_get('test')
3365
34 will cache the result of unit_get + 'test' for future calls.66 will cache the result of unit_get + 'test' for future calls.
35 '''67 """
68 @wraps(func)
36 def wrapper(*args, **kwargs):69 def wrapper(*args, **kwargs):
37 global cache70 global cache
38 key = str((func, args, kwargs))71 key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
39 try:72 try:
40 return cache[key]73 return cache[key]
41 except KeyError:74 except KeyError:
42 res = func(*args, **kwargs)75 pass # Drop out of the exception handler scope.
43 cache[key] = res76 res = func(*args, **kwargs)
44 return res77 cache[key] = res
78 return res
79 wrapper._wrapped = func
45 return wrapper80 return wrapper
4681
4782
48def flush(key):83def flush(key):
49 ''' Flushes any entries from function cache where the84 """Flushes any entries from function cache where the
50 key is found in the function+args '''85 key is found in the function+args """
51 flush_list = []86 flush_list = []
52 for item in cache:87 for item in cache:
53 if key in item:88 if key in item:
@@ -57,20 +92,33 @@ def flush(key):
5792
5893
59def log(message, level=None):94def log(message, level=None):
60 "Write a message to the juju log"95 """Write a message to the juju log"""
61 command = ['juju-log']96 command = ['juju-log']
62 if level:97 if level:
63 command += ['-l', level]98 command += ['-l', level]
99 if not isinstance(message, six.string_types):
100 message = repr(message)
64 command += [message]101 command += [message]
65 subprocess.call(command)102 # Missing juju-log should not cause failures in unit tests
103 # Send log output to stderr
104 try:
105 subprocess.call(command)
106 except OSError as e:
107 if e.errno == errno.ENOENT:
108 if level:
109 message = "{}: {}".format(level, message)
110 message = "juju-log: {}".format(message)
111 print(message, file=sys.stderr)
112 else:
113 raise
66114
67115
68class Serializable(UserDict.IterableUserDict):116class Serializable(UserDict):
69 "Wrapper, an object that can be serialized to yaml or json"117 """Wrapper, an object that can be serialized to yaml or json"""
70118
71 def __init__(self, obj):119 def __init__(self, obj):
72 # wrap the object120 # wrap the object
73 UserDict.IterableUserDict.__init__(self)121 UserDict.__init__(self)
74 self.data = obj122 self.data = obj
75123
76 def __getattr__(self, attr):124 def __getattr__(self, attr):
@@ -96,11 +144,11 @@ class Serializable(UserDict.IterableUserDict):
96 self.data = state144 self.data = state
97145
98 def json(self):146 def json(self):
99 "Serialize the object to json"147 """Serialize the object to json"""
100 return json.dumps(self.data)148 return json.dumps(self.data)
101149
102 def yaml(self):150 def yaml(self):
103 "Serialize the object to yaml"151 """Serialize the object to yaml"""
104 return yaml.dump(self.data)152 return yaml.dump(self.data)
105153
106154
@@ -119,50 +167,261 @@ def execution_environment():
119167
120168
121def in_relation_hook():169def in_relation_hook():
122 "Determine whether we're running in a relation hook"170 """Determine whether we're running in a relation hook"""
123 return 'JUJU_RELATION' in os.environ171 return 'JUJU_RELATION' in os.environ
124172
125173
126def relation_type():174def relation_type():
127 "The scope for the current relation hook"175 """The scope for the current relation hook"""
128 return os.environ.get('JUJU_RELATION', None)176 return os.environ.get('JUJU_RELATION', None)
129177
130178
131def relation_id():179@cached
132 "The relation ID for the current relation hook"180def relation_id(relation_name=None, service_or_unit=None):
133 return os.environ.get('JUJU_RELATION_ID', None)181 """The relation ID for the current or a specified relation"""
182 if not relation_name and not service_or_unit:
183 return os.environ.get('JUJU_RELATION_ID', None)
184 elif relation_name and service_or_unit:
185 service_name = service_or_unit.split('/')[0]
186 for relid in relation_ids(relation_name):
187 remote_service = remote_service_name(relid)
188 if remote_service == service_name:
189 return relid
190 else:
191 raise ValueError('Must specify neither or both of relation_name and service_or_unit')
134192
135193
136def local_unit():194def local_unit():
137 "Local unit ID"195 """Local unit ID"""
138 return os.environ['JUJU_UNIT_NAME']196 return os.environ['JUJU_UNIT_NAME']
139197
140198
141def remote_unit():199def remote_unit():
142 "The remote unit for the current relation hook"200 """The remote unit for the current relation hook"""
143 return os.environ['JUJU_REMOTE_UNIT']201 return os.environ.get('JUJU_REMOTE_UNIT', None)
144202
145203
146def service_name():204def application_name():
147 "The name service group this unit belongs to"205 """
206 The name of the deployed application this unit belongs to.
207 """
148 return local_unit().split('/')[0]208 return local_unit().split('/')[0]
149209
150210
211def service_name():
212 """
213 .. deprecated:: 0.19.1
214 Alias for :func:`application_name`.
215 """
216 return application_name()
217
218
219def model_name():
220 """
221 Name of the model that this unit is deployed in.
222 """
223 return os.environ['JUJU_MODEL_NAME']
224
225
226def model_uuid():
227 """
228 UUID of the model that this unit is deployed in.
229 """
230 return os.environ['JUJU_MODEL_UUID']
231
232
233def principal_unit():
234 """Returns the principal unit of this unit, otherwise None"""
235 # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
236 principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
237 # If it's empty, then this unit is the principal
238 if principal_unit == '':
239 return os.environ['JUJU_UNIT_NAME']
240 elif principal_unit is not None:
241 return principal_unit
242 # For Juju 2.1 and below, let's try work out the principle unit by
243 # the various charms' metadata.yaml.
244 for reltype in relation_types():
245 for rid in relation_ids(reltype):
246 for unit in related_units(rid):
247 md = _metadata_unit(unit)
248 if not md:
249 continue
250 subordinate = md.pop('subordinate', None)
251 if not subordinate:
252 return unit
253 return None
254
255
151@cached256@cached
257def remote_service_name(relid=None):
258 """The remote service name for a given relation-id (or the current relation)"""
259 if relid is None:
260 unit = remote_unit()
261 else:
262 units = related_units(relid)
263 unit = units[0] if units else None
264 return unit.split('/')[0] if unit else None
265
266
267def hook_name():
268 """The name of the currently executing hook"""
269 return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
270
271
272class Config(dict):
273 """A dictionary representation of the charm's config.yaml, with some
274 extra features:
275
276 - See which values in the dictionary have changed since the previous hook.
277 - For values that have changed, see what the previous value was.
278 - Store arbitrary data for use in a later hook.
279
280 NOTE: Do not instantiate this object directly - instead call
281 ``hookenv.config()``, which will return an instance of :class:`Config`.
282
283 Example usage::
284
285 >>> # inside a hook
286 >>> from charmhelpers.core import hookenv
287 >>> config = hookenv.config()
288 >>> config['foo']
289 'bar'
290 >>> # store a new key/value for later use
291 >>> config['mykey'] = 'myval'
292
293
294 >>> # user runs `juju set mycharm foo=baz`
295 >>> # now we're inside subsequent config-changed hook
296 >>> config = hookenv.config()
297 >>> config['foo']
298 'baz'
299 >>> # test to see if this val has changed since last hook
300 >>> config.changed('foo')
301 True
302 >>> # what was the previous value?
303 >>> config.previous('foo')
304 'bar'
305 >>> # keys/values that we add are preserved across hooks
306 >>> config['mykey']
307 'myval'
308
309 """
310 CONFIG_FILE_NAME = '.juju-persistent-config'
311
312 def __init__(self, *args, **kw):
313 super(Config, self).__init__(*args, **kw)
314 self.implicit_save = True
315 self._prev_dict = None
316 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
317 if os.path.exists(self.path) and os.stat(self.path).st_size:
318 self.load_previous()
319 atexit(self._implicit_save)
320
321 def load_previous(self, path=None):
322 """Load previous copy of config from disk.
323
324 In normal usage you don't need to call this method directly - it
325 is called automatically at object initialization.
326
327 :param path:
328
329 File path from which to load the previous config. If `None`,
330 config is loaded from the default location. If `path` is
331 specified, subsequent `save()` calls will write to the same
332 path.
333
334 """
335 self.path = path or self.path
336 with open(self.path) as f:
337 try:
338 self._prev_dict = json.load(f)
339 except ValueError as e:
340 log('Unable to parse previous config data - {}'.format(str(e)),
341 level=ERROR)
342 for k, v in copy.deepcopy(self._prev_dict).items():
343 if k not in self:
344 self[k] = v
345
346 def changed(self, key):
347 """Return True if the current value for this key is different from
348 the previous value.
349
350 """
351 if self._prev_dict is None:
352 return True
353 return self.previous(key) != self.get(key)
354
355 def previous(self, key):
356 """Return previous value for this key, or None if there
357 is no previous value.
358
359 """
360 if self._prev_dict:
361 return self._prev_dict.get(key)
362 return None
363
364 def save(self):
365 """Save this config to disk.
366
367 If the charm is using the :mod:`Services Framework <services.base>`
368 or :meth:'@hook <Hooks.hook>' decorator, this
369 is called automatically at the end of successful hook execution.
370 Otherwise, it should be called directly by user code.
371
372 To disable automatic saves, set ``implicit_save=False`` on this
373 instance.
374
375 """
376 with open(self.path, 'w') as f:
377 os.fchmod(f.fileno(), 0o600)
378 json.dump(self, f)
379
380 def _implicit_save(self):
381 if self.implicit_save:
382 self.save()
383
384
385_cache_config = None
386
387
152def config(scope=None):388def config(scope=None):
153 "Juju charm configuration"389 """
154 config_cmd_line = ['config-get']390 Get the juju charm configuration (scope==None) or individual key,
155 if scope is not None:391 (scope=str). The returned value is a Python data structure loaded as
156 config_cmd_line.append(scope)392 JSON from the Juju config command.
157 config_cmd_line.append('--format=json')393
394 :param scope: If set, return the value for the specified key.
395 :type scope: Optional[str]
396 :returns: Either the whole config as a Config, or a key from it.
397 :rtype: Any
398 """
399 global _cache_config
400 config_cmd_line = ['config-get', '--all', '--format=json']
158 try:401 try:
159 return json.loads(subprocess.check_output(config_cmd_line))402 # JSON Decode Exception for Python3.5+
160 except ValueError:403 exc_json = json.decoder.JSONDecodeError
404 except AttributeError:
405 # JSON Decode Exception for Python2.7 through Python3.4
406 exc_json = ValueError
407 try:
408 if _cache_config is None:
409 config_data = json.loads(
410 subprocess.check_output(config_cmd_line).decode('UTF-8'))
411 _cache_config = Config(config_data)
412 if scope is not None:
413 return _cache_config.get(scope)
414 return _cache_config
415 except (exc_json, UnicodeDecodeError) as e:
416 log('Unable to parse output from config-get: config_cmd_line="{}" '
417 'message="{}"'
418 .format(config_cmd_line, str(e)), level=ERROR)
161 return None419 return None
162420
163421
164@cached422@cached
165def relation_get(attribute=None, unit=None, rid=None):423def relation_get(attribute=None, unit=None, rid=None):
424 """Get relation information"""
166 _args = ['relation-get', '--format=json']425 _args = ['relation-get', '--format=json']
167 if rid:426 if rid:
168 _args.append('-r')427 _args.append('-r')
@@ -171,49 +430,88 @@ def relation_get(attribute=None, unit=None, rid=None):
171 if unit:430 if unit:
172 _args.append(unit)431 _args.append(unit)
173 try:432 try:
174 return json.loads(subprocess.check_output(_args))433 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
175 except ValueError:434 except ValueError:
176 return None435 return None
436 except CalledProcessError as e:
437 if e.returncode == 2:
438 return None
439 raise
177440
178441
179def relation_set(relation_id=None, relation_settings={}, **kwargs):442def relation_set(relation_id=None, relation_settings=None, **kwargs):
443 """Set relation information for the current unit"""
444 relation_settings = relation_settings if relation_settings else {}
180 relation_cmd_line = ['relation-set']445 relation_cmd_line = ['relation-set']
446 accepts_file = "--file" in subprocess.check_output(
447 relation_cmd_line + ["--help"], universal_newlines=True)
181 if relation_id is not None:448 if relation_id is not None:
182 relation_cmd_line.extend(('-r', relation_id))449 relation_cmd_line.extend(('-r', relation_id))
183 for k, v in (relation_settings.items() + kwargs.items()):450 settings = relation_settings.copy()
184 if v is None:451 settings.update(kwargs)
185 relation_cmd_line.append('{}='.format(k))452 for key, value in settings.items():
186 else:453 # Force value to be a string: it always should, but some call
187 relation_cmd_line.append('{}={}'.format(k, v))454 # sites pass in things like dicts or numbers.
188 subprocess.check_call(relation_cmd_line)455 if value is not None:
456 settings[key] = "{}".format(value)
457 if accepts_file:
458 # --file was introduced in Juju 1.23.2. Use it by default if
459 # available, since otherwise we'll break if the relation data is
460 # too big. Ideally we should tell relation-set to read the data from
461 # stdin, but that feature is broken in 1.23.2: Bug #1454678.
462 with tempfile.NamedTemporaryFile(delete=False) as settings_file:
463 settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
464 subprocess.check_call(
465 relation_cmd_line + ["--file", settings_file.name])
466 os.remove(settings_file.name)
467 else:
468 for key, value in settings.items():
469 if value is None:
470 relation_cmd_line.append('{}='.format(key))
471 else:
472 relation_cmd_line.append('{}={}'.format(key, value))
473 subprocess.check_call(relation_cmd_line)
189 # Flush cache of any relation-gets for local unit474 # Flush cache of any relation-gets for local unit
190 flush(local_unit())475 flush(local_unit())
191476
192477
478def relation_clear(r_id=None):
479 ''' Clears any relation data already set on relation r_id '''
480 settings = relation_get(rid=r_id,
481 unit=local_unit())
482 for setting in settings:
483 if setting not in ['public-address', 'private-address']:
484 settings[setting] = None
485 relation_set(relation_id=r_id,
486 **settings)
487
488
193@cached489@cached
194def relation_ids(reltype=None):490def relation_ids(reltype=None):
195 "A list of relation_ids"491 """A list of relation_ids"""
196 reltype = reltype or relation_type()492 reltype = reltype or relation_type()
197 relid_cmd_line = ['relation-ids', '--format=json']493 relid_cmd_line = ['relation-ids', '--format=json']
198 if reltype is not None:494 if reltype is not None:
199 relid_cmd_line.append(reltype)495 relid_cmd_line.append(reltype)
200 return json.loads(subprocess.check_output(relid_cmd_line)) or []496 return json.loads(
497 subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
201 return []498 return []
202499
203500
204@cached501@cached
205def related_units(relid=None):502def related_units(relid=None):
206 "A list of related units"503 """A list of related units"""
207 relid = relid or relation_id()504 relid = relid or relation_id()
208 units_cmd_line = ['relation-list', '--format=json']505 units_cmd_line = ['relation-list', '--format=json']
209 if relid is not None:506 if relid is not None:
210 units_cmd_line.extend(('-r', relid))507 units_cmd_line.extend(('-r', relid))
211 return json.loads(subprocess.check_output(units_cmd_line)) or []508 return json.loads(
509 subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
212510
213511
214@cached512@cached
215def relation_for_unit(unit=None, rid=None):513def relation_for_unit(unit=None, rid=None):
216 "Get the json represenation of a unit's relation"514 """Get the json represenation of a unit's relation"""
217 unit = unit or remote_unit()515 unit = unit or remote_unit()
218 relation = relation_get(unit=unit, rid=rid)516 relation = relation_get(unit=unit, rid=rid)
219 for key in relation:517 for key in relation:
@@ -225,7 +523,7 @@ def relation_for_unit(unit=None, rid=None):
225523
226@cached524@cached
227def relations_for_id(relid=None):525def relations_for_id(relid=None):
228 "Get relations of a specific relation ID"526 """Get relations of a specific relation ID"""
229 relation_data = []527 relation_data = []
230 relid = relid or relation_ids()528 relid = relid or relation_ids()
231 for unit in related_units(relid):529 for unit in related_units(relid):
@@ -237,7 +535,7 @@ def relations_for_id(relid=None):
237535
238@cached536@cached
239def relations_of_type(reltype=None):537def relations_of_type(reltype=None):
240 "Get relations of a specific type"538 """Get relations of a specific type"""
241 relation_data = []539 relation_data = []
242 reltype = reltype or relation_type()540 reltype = reltype or relation_type()
243 for relid in relation_ids(reltype):541 for relid in relation_ids(reltype):
@@ -248,22 +546,121 @@ def relations_of_type(reltype=None):
248546
249547
250@cached548@cached
549def metadata():
550 """Get the current charm metadata.yaml contents as a python object"""
551 with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
552 return yaml.safe_load(md)
553
554
555def _metadata_unit(unit):
556 """Given the name of a unit (e.g. apache2/0), get the unit charm's
557 metadata.yaml. Very similar to metadata() but allows us to inspect
558 other units. Unit needs to be co-located, such as a subordinate or
559 principal/primary.
560
561 :returns: metadata.yaml as a python object.
562
563 """
564 basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
565 unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
566 joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
567 if not os.path.exists(joineddir):
568 return None
569 with open(joineddir) as md:
570 return yaml.safe_load(md)
571
572
573@cached
251def relation_types():574def relation_types():
252 "Get a list of relation types supported by this charm"575 """Get a list of relation types supported by this charm"""
253 charmdir = os.environ.get('CHARM_DIR', '')
254 mdf = open(os.path.join(charmdir, 'metadata.yaml'))
255 md = yaml.safe_load(mdf)
256 rel_types = []576 rel_types = []
577 md = metadata()
257 for key in ('provides', 'requires', 'peers'):578 for key in ('provides', 'requires', 'peers'):
258 section = md.get(key)579 section = md.get(key)
259 if section:580 if section:
260 rel_types.extend(section.keys())581 rel_types.extend(section.keys())
261 mdf.close()
262 return rel_types582 return rel_types
263583
264584
265@cached585@cached
586def peer_relation_id():
587 '''Get the peers relation id if a peers relation has been joined, else None.'''
588 md = metadata()
589 section = md.get('peers')
590 if section:
591 for key in section:
592 relids = relation_ids(key)
593 if relids:
594 return relids[0]
595 return None
596
597
598@cached
599def relation_to_interface(relation_name):
600 """
601 Given the name of a relation, return the interface that relation uses.
602
603 :returns: The interface name, or ``None``.
604 """
605 return relation_to_role_and_interface(relation_name)[1]
606
607
608@cached
609def relation_to_role_and_interface(relation_name):
610 """
611 Given the name of a relation, return the role and the name of the interface
612 that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
613
614 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
615 """
616 _metadata = metadata()
617 for role in ('provides', 'requires', 'peers'):
618 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
619 if interface:
620 return role, interface
621 return None, None
622
623
624@cached
625def role_and_interface_to_relations(role, interface_name):
626 """
627 Given a role and interface name, return a list of relation names for the
628 current charm that use that interface under that role (where role is one
629 of ``provides``, ``requires``, or ``peers``).
630
631 :returns: A list of relation names.
632 """
633 _metadata = metadata()
634 results = []
635 for relation_name, relation in _metadata.get(role, {}).items():
636 if relation['interface'] == interface_name:
637 results.append(relation_name)
638 return results
639
640
641@cached
642def interface_to_relations(interface_name):
643 """
644 Given an interface, return a list of relation names for the current
645 charm that use that interface.
646
647 :returns: A list of relation names.
648 """
649 results = []
650 for role in ('provides', 'requires', 'peers'):
651 results.extend(role_and_interface_to_relations(role, interface_name))
652 return results
653
654
655@cached
656def charm_name():
657 """Get the name of the current charm as is specified on metadata.yaml"""
658 return metadata().get('name')
659
660
661@cached
266def relations():662def relations():
663 """Get a nested dictionary of relation data for all related units"""
267 rels = {}664 rels = {}
268 for reltype in relation_types():665 for reltype in relation_types():
269 relids = {}666 relids = {}
@@ -277,53 +674,187 @@ def relations():
277 return rels674 return rels
278675
279676
677@cached
678def is_relation_made(relation, keys='private-address'):
679 '''
680 Determine whether a relation is established by checking for
681 presence of key(s). If a list of keys is provided, they
682 must all be present for the relation to be identified as made
683 '''
684 if isinstance(keys, str):
685 keys = [keys]
686 for r_id in relation_ids(relation):
687 for unit in related_units(r_id):
688 context = {}
689 for k in keys:
690 context[k] = relation_get(k, rid=r_id,
691 unit=unit)
692 if None not in context.values():
693 return True
694 return False
695
696
697def _port_op(op_name, port, protocol="TCP"):
698 """Open or close a service network port"""
699 _args = [op_name]
700 icmp = protocol.upper() == "ICMP"
701 if icmp:
702 _args.append(protocol)
703 else:
704 _args.append('{}/{}'.format(port, protocol))
705 try:
706 subprocess.check_call(_args)
707 except subprocess.CalledProcessError:
708 # Older Juju pre 2.3 doesn't support ICMP
709 # so treat it as a no-op if it fails.
710 if not icmp:
711 raise
712
713
280def open_port(port, protocol="TCP"):714def open_port(port, protocol="TCP"):
281 "Open a service network port"715 """Open a service network port"""
716 _port_op('open-port', port, protocol)
717
718
719def close_port(port, protocol="TCP"):
720 """Close a service network port"""
721 _port_op('close-port', port, protocol)
722
723
724def open_ports(start, end, protocol="TCP"):
725 """Opens a range of service network ports"""
282 _args = ['open-port']726 _args = ['open-port']
283 _args.append('{}/{}'.format(port, protocol))727 _args.append('{}-{}/{}'.format(start, end, protocol))
284 subprocess.check_call(_args)728 subprocess.check_call(_args)
285729
286730
287def close_port(port, protocol="TCP"):731def close_ports(start, end, protocol="TCP"):
288 "Close a service network port"732 """Close a range of service network ports"""
289 _args = ['close-port']733 _args = ['close-port']
290 _args.append('{}/{}'.format(port, protocol))734 _args.append('{}-{}/{}'.format(start, end, protocol))
291 subprocess.check_call(_args)735 subprocess.check_call(_args)
292736
293737
738def opened_ports():
739 """Get the opened ports
740
741 *Note that this will only show ports opened in a previous hook*
742
743 :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
744 """
745 _args = ['opened-ports', '--format=json']
746 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
747
748
294@cached749@cached
295def unit_get(attribute):750def unit_get(attribute):
751 """Get the unit ID for the remote unit"""
296 _args = ['unit-get', '--format=json', attribute]752 _args = ['unit-get', '--format=json', attribute]
297 try:753 try:
298 return json.loads(subprocess.check_output(_args))754 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
299 except ValueError:755 except ValueError:
300 return None756 return None
301757
302758
759def unit_public_ip():
760 """Get this unit's public IP address"""
761 return unit_get('public-address')
762
763
303def unit_private_ip():764def unit_private_ip():
765 """Get this unit's private IP address"""
304 return unit_get('private-address')766 return unit_get('private-address')
305767
306768
769@cached
770def storage_get(attribute=None, storage_id=None):
771 """Get storage attributes"""
772 _args = ['storage-get', '--format=json']
773 if storage_id:
774 _args.extend(('-s', storage_id))
775 if attribute:
776 _args.append(attribute)
777 try:
778 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
779 except ValueError:
780 return None
781
782
783@cached
784def storage_list(storage_name=None):
785 """List the storage IDs for the unit"""
786 _args = ['storage-list', '--format=json']
787 if storage_name:
788 _args.append(storage_name)
789 try:
790 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
791 except ValueError:
792 return None
793 except OSError as e:
794 import errno
795 if e.errno == errno.ENOENT:
796 # storage-list does not exist
797 return []
798 raise
799
800
307class UnregisteredHookError(Exception):801class UnregisteredHookError(Exception):
802 """Raised when an undefined hook is called"""
308 pass803 pass
309804
310805
311class Hooks(object):806class Hooks(object):
312 def __init__(self):807 """A convenient handler for hook functions.
808
809 Example::
810
811 hooks = Hooks()
812
813 # register a hook, taking its name from the function name
814 @hooks.hook()
815 def install():
816 pass # your code here
817
818 # register a hook, providing a custom hook name
819 @hooks.hook("config-changed")
820 def config_changed():
821 pass # your code here
822
823 if __name__ == "__main__":
824 # execute a hook based on the name the program is called by
825 hooks.execute(sys.argv)
826 """
827
828 def __init__(self, config_save=None):
313 super(Hooks, self).__init__()829 super(Hooks, self).__init__()
314 self._hooks = {}830 self._hooks = {}
315831
832 # For unknown reasons, we allow the Hooks constructor to override
833 # config().implicit_save.
834 if config_save is not None:
835 config().implicit_save = config_save
836
316 def register(self, name, function):837 def register(self, name, function):
838 """Register a hook"""
317 self._hooks[name] = function839 self._hooks[name] = function
318840
319 def execute(self, args):841 def execute(self, args):
842 """Execute a registered hook based on args[0]"""
843 _run_atstart()
320 hook_name = os.path.basename(args[0])844 hook_name = os.path.basename(args[0])
321 if hook_name in self._hooks:845 if hook_name in self._hooks:
322 self._hooks[hook_name]()846 try:
847 self._hooks[hook_name]()
848 except SystemExit as x:
849 if x.code is None or x.code == 0:
850 _run_atexit()
851 raise
852 _run_atexit()
323 else:853 else:
324 raise UnregisteredHookError(hook_name)854 raise UnregisteredHookError(hook_name)
325855
326 def hook(self, *hook_names):856 def hook(self, *hook_names):
857 """Decorator, registering them as hooks"""
327 def wrapper(decorated):858 def wrapper(decorated):
328 for hook_name in hook_names:859 for hook_name in hook_names:
329 self.register(hook_name, decorated)860 self.register(hook_name, decorated)
@@ -336,5 +867,457 @@ class Hooks(object):
336 return wrapper867 return wrapper
337868
338869
870class NoNetworkBinding(Exception):
871 pass
872
873
339def charm_dir():874def charm_dir():
875 """Return the root directory of the current charm"""
876 d = os.environ.get('JUJU_CHARM_DIR')
877 if d is not None:
878 return d
340 return os.environ.get('CHARM_DIR')879 return os.environ.get('CHARM_DIR')
880
881
882@cached
883def action_get(key=None):
884 """Gets the value of an action parameter, or all key/value param pairs"""
885 cmd = ['action-get']
886 if key is not None:
887 cmd.append(key)
888 cmd.append('--format=json')
889 action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
890 return action_data
891
892
893def action_set(values):
894 """Sets the values to be returned after the action finishes"""
895 cmd = ['action-set']
896 for k, v in list(values.items()):
897 cmd.append('{}={}'.format(k, v))
898 subprocess.check_call(cmd)
899
900
901def action_fail(message):
902 """Sets the action status to failed and sets the error message.
903
904 The results set by action_set are preserved."""
905 subprocess.check_call(['action-fail', message])
906
907
908def action_name():
909 """Get the name of the currently executing action."""
910 return os.environ.get('JUJU_ACTION_NAME')
911
912
913def action_uuid():
914 """Get the UUID of the currently executing action."""
915 return os.environ.get('JUJU_ACTION_UUID')
916
917
918def action_tag():
919 """Get the tag for the currently executing action."""
920 return os.environ.get('JUJU_ACTION_TAG')
921
922
923def status_set(workload_state, message):
924 """Set the workload state with a message
925
926 Use status-set to set the workload state with a message which is visible
927 to the user via juju status. If the status-set command is not found then
928 assume this is juju < 1.23 and juju-log the message unstead.
929
930 workload_state -- valid juju workload state.
931 message -- status update message
932 """
933 valid_states = ['maintenance', 'blocked', 'waiting', 'active']
934 if workload_state not in valid_states:
935 raise ValueError(
936 '{!r} is not a valid workload state'.format(workload_state)
937 )
938 cmd = ['status-set', workload_state, message]
939 try:
940 ret = subprocess.call(cmd)
941 if ret == 0:
942 return
943 except OSError as e:
944 if e.errno != errno.ENOENT:
945 raise
946 log_message = 'status-set failed: {} {}'.format(workload_state,
947 message)
948 log(log_message, level='INFO')
949
950
951def status_get():
952 """Retrieve the previously set juju workload state and message
953
954 If the status-get command is not found then assume this is juju < 1.23 and
955 return 'unknown', ""
956
957 """
958 cmd = ['status-get', "--format=json", "--include-data"]
959 try:
960 raw_status = subprocess.check_output(cmd)
961 except OSError as e:
962 if e.errno == errno.ENOENT:
963 return ('unknown', "")
964 else:
965 raise
966 else:
967 status = json.loads(raw_status.decode("UTF-8"))
968 return (status["status"], status["message"])
969
970
971def translate_exc(from_exc, to_exc):
972 def inner_translate_exc1(f):
973 @wraps(f)
974 def inner_translate_exc2(*args, **kwargs):
975 try:
976 return f(*args, **kwargs)
977 except from_exc:
978 raise to_exc
979
980 return inner_translate_exc2
981
982 return inner_translate_exc1
983
984
985def application_version_set(version):
986 """Charm authors may trigger this command from any hook to output what
987 version of the application is running. This could be a package version,
988 for instance postgres version 9.5. It could also be a build number or
989 version control revision identifier, for instance git sha 6fb7ba68. """
990
991 cmd = ['application-version-set']
992 cmd.append(version)
993 try:
994 subprocess.check_call(cmd)
995 except OSError:
996 log("Application Version: {}".format(version))
997
998
999@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1000def goal_state():
1001 """Juju goal state values"""
1002 cmd = ['goal-state', '--format=json']
1003 return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
1004
1005
1006@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1007def is_leader():
1008 """Does the current unit hold the juju leadership
1009
1010 Uses juju to determine whether the current unit is the leader of its peers
1011 """
1012 cmd = ['is-leader', '--format=json']
1013 return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
1014
1015
1016@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1017def leader_get(attribute=None):
1018 """Juju leader get value(s)"""
1019 cmd = ['leader-get', '--format=json'] + [attribute or '-']
1020 return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
1021
1022
1023@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1024def leader_set(settings=None, **kwargs):
1025 """Juju leader set value(s)"""
1026 # Don't log secrets.
1027 # log("Juju leader-set '%s'" % (settings), level=DEBUG)
1028 cmd = ['leader-set']
1029 settings = settings or {}
1030 settings.update(kwargs)
1031 for k, v in settings.items():
1032 if v is None:
1033 cmd.append('{}='.format(k))
1034 else:
1035 cmd.append('{}={}'.format(k, v))
1036 subprocess.check_call(cmd)
1037
1038
1039@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1040def payload_register(ptype, klass, pid):
1041 """ is used while a hook is running to let Juju know that a
1042 payload has been started."""
1043 cmd = ['payload-register']
1044 for x in [ptype, klass, pid]:
1045 cmd.append(x)
1046 subprocess.check_call(cmd)
1047
1048
1049@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1050def payload_unregister(klass, pid):
1051 """ is used while a hook is running to let Juju know
1052 that a payload has been manually stopped. The <class> and <id> provided
1053 must match a payload that has been previously registered with juju using
1054 payload-register."""
1055 cmd = ['payload-unregister']
1056 for x in [klass, pid]:
1057 cmd.append(x)
1058 subprocess.check_call(cmd)
1059
1060
1061@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1062def payload_status_set(klass, pid, status):
1063 """is used to update the current status of a registered payload.
1064 The <class> and <id> provided must match a payload that has been previously
1065 registered with juju using payload-register. The <status> must be one of the
1066 follow: starting, started, stopping, stopped"""
1067 cmd = ['payload-status-set']
1068 for x in [klass, pid, status]:
1069 cmd.append(x)
1070 subprocess.check_call(cmd)
1071
1072
1073@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1074def resource_get(name):
1075 """used to fetch the resource path of the given name.
1076
1077 <name> must match a name of defined resource in metadata.yaml
1078
1079 returns either a path or False if resource not available
1080 """
1081 if not name:
1082 return False
1083
1084 cmd = ['resource-get', name]
1085 try:
1086 return subprocess.check_output(cmd).decode('UTF-8')
1087 except subprocess.CalledProcessError:
1088 return False
1089
1090
1091@cached
1092def juju_version():
1093 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
1094 # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
1095 jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
1096 return subprocess.check_output([jujud, 'version'],
1097 universal_newlines=True).strip()
1098
1099
1100def has_juju_version(minimum_version):
1101 """Return True if the Juju version is at least the provided version"""
1102 return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
1103
1104
1105_atexit = []
1106_atstart = []
1107
1108
1109def atstart(callback, *args, **kwargs):
1110 '''Schedule a callback to run before the main hook.
1111
1112 Callbacks are run in the order they were added.
1113
1114 This is useful for modules and classes to perform initialization
1115 and inject behavior. In particular:
1116
1117 - Run common code before all of your hooks, such as logging
1118 the hook name or interesting relation data.
1119 - Defer object or module initialization that requires a hook
1120 context until we know there actually is a hook context,
1121 making testing easier.
1122 - Rather than requiring charm authors to include boilerplate to
1123 invoke your helper's behavior, have it run automatically if
1124 your object is instantiated or module imported.
1125
1126 This is not at all useful after your hook framework as been launched.
1127 '''
1128 global _atstart
1129 _atstart.append((callback, args, kwargs))
1130
1131
1132def atexit(callback, *args, **kwargs):
1133 '''Schedule a callback to run on successful hook completion.
1134
1135 Callbacks are run in the reverse order that they were added.'''
1136 _atexit.append((callback, args, kwargs))
1137
1138
1139def _run_atstart():
1140 '''Hook frameworks must invoke this before running the main hook body.'''
1141 global _atstart
1142 for callback, args, kwargs in _atstart:
1143 callback(*args, **kwargs)
1144 del _atstart[:]
1145
1146
1147def _run_atexit():
1148 '''Hook frameworks must invoke this after the main hook body has
1149 successfully completed. Do not invoke it if the hook fails.'''
1150 global _atexit
1151 for callback, args, kwargs in reversed(_atexit):
1152 callback(*args, **kwargs)
1153 del _atexit[:]
1154
1155
1156@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1157def network_get_primary_address(binding):
1158 '''
1159 Deprecated since Juju 2.3; use network_get()
1160
1161 Retrieve the primary network address for a named binding
1162
1163 :param binding: string. The name of a relation of extra-binding
1164 :return: string. The primary IP address for the named binding
1165 :raise: NotImplementedError if run on Juju < 2.0
1166 '''
1167 cmd = ['network-get', '--primary-address', binding]
1168 try:
1169 response = subprocess.check_output(
1170 cmd,
1171 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1172 except CalledProcessError as e:
1173 if 'no network config found for binding' in e.output.decode('UTF-8'):
1174 raise NoNetworkBinding("No network binding for {}"
1175 .format(binding))
1176 else:
1177 raise
1178 return response
1179
1180
1181def network_get(endpoint, relation_id=None):
1182 """
1183 Retrieve the network details for a relation endpoint
1184
1185 :param endpoint: string. The name of a relation endpoint
1186 :param relation_id: int. The ID of the relation for the current context.
1187 :return: dict. The loaded YAML output of the network-get query.
1188 :raise: NotImplementedError if request not supported by the Juju version.
1189 """
1190 if not has_juju_version('2.2'):
1191 raise NotImplementedError(juju_version()) # earlier versions require --primary-address
1192 if relation_id and not has_juju_version('2.3'):
1193 raise NotImplementedError # 2.3 added the -r option
1194
1195 cmd = ['network-get', endpoint, '--format', 'yaml']
1196 if relation_id:
1197 cmd.append('-r')
1198 cmd.append(relation_id)
1199 response = subprocess.check_output(
1200 cmd,
1201 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1202 return yaml.safe_load(response)
1203
1204
1205def add_metric(*args, **kwargs):
1206 """Add metric values. Values may be expressed with keyword arguments. For
1207 metric names containing dashes, these may be expressed as one or more
1208 'key=value' positional arguments. May only be called from the collect-metrics
1209 hook."""
1210 _args = ['add-metric']
1211 _kvpairs = []
1212 _kvpairs.extend(args)
1213 _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
1214 _args.extend(sorted(_kvpairs))
1215 try:
1216 subprocess.check_call(_args)
1217 return
1218 except EnvironmentError as e:
1219 if e.errno != errno.ENOENT:
1220 raise
1221 log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
1222 log(log_message, level='INFO')
1223
1224
1225def meter_status():
1226 """Get the meter status, if running in the meter-status-changed hook."""
1227 return os.environ.get('JUJU_METER_STATUS')
1228
1229
1230def meter_info():
1231 """Get the meter status information, if running in the meter-status-changed
1232 hook."""
1233 return os.environ.get('JUJU_METER_INFO')
1234
1235
1236def iter_units_for_relation_name(relation_name):
1237 """Iterate through all units in a relation
1238
1239 Generator that iterates through all the units in a relation and yields
1240 a named tuple with rid and unit field names.
1241
1242 Usage:
1243 data = [(u.rid, u.unit)
1244 for u in iter_units_for_relation_name(relation_name)]
1245
1246 :param relation_name: string relation name
1247 :yield: Named Tuple with rid and unit field names
1248 """
1249 RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
1250 for rid in relation_ids(relation_name):
1251 for unit in related_units(rid):
1252 yield RelatedUnit(rid, unit)
1253
1254
1255def ingress_address(rid=None, unit=None):
1256 """
1257 Retrieve the ingress-address from a relation when available.
1258 Otherwise, return the private-address.
1259
1260 When used on the consuming side of the relation (unit is a remote
1261 unit), the ingress-address is the IP address that this unit needs
1262 to use to reach the provided service on the remote unit.
1263
1264 When used on the providing side of the relation (unit == local_unit()),
1265 the ingress-address is the IP address that is advertised to remote
1266 units on this relation. Remote units need to use this address to
1267 reach the local provided service on this unit.
1268
1269 Note that charms may document some other method to use in
1270 preference to the ingress_address(), such as an address provided
1271 on a different relation attribute or a service discovery mechanism.
1272 This allows charms to redirect inbound connections to their peers
1273 or different applications such as load balancers.
1274
1275 Usage:
1276 addresses = [ingress_address(rid=u.rid, unit=u.unit)
1277 for u in iter_units_for_relation_name(relation_name)]
1278
1279 :param rid: string relation id
1280 :param unit: string unit name
1281 :side effect: calls relation_get
1282 :return: string IP address
1283 """
1284 settings = relation_get(rid=rid, unit=unit)
1285 return (settings.get('ingress-address') or
1286 settings.get('private-address'))
1287
1288
1289def egress_subnets(rid=None, unit=None):
1290 """
1291 Retrieve the egress-subnets from a relation.
1292
1293 This function is to be used on the providing side of the
1294 relation, and provides the ranges of addresses that client
1295 connections may come from. The result is uninteresting on
1296 the consuming side of a relation (unit == local_unit()).
1297
1298 Returns a stable list of subnets in CIDR format.
1299 eg. ['192.168.1.0/24', '2001::F00F/128']
1300
1301 If egress-subnets is not available, falls back to using the published
1302 ingress-address, or finally private-address.
1303
1304 :param rid: string relation id
1305 :param unit: string unit name
1306 :side effect: calls relation_get
1307 :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
1308 """
1309 def _to_range(addr):
1310 if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
1311 addr += '/32'
1312 elif ':' in addr and '/' not in addr: # IPv6
1313 addr += '/128'
1314 return addr
1315
1316 settings = relation_get(rid=rid, unit=unit)
1317 if 'egress-subnets' in settings:
1318 return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
1319 if 'ingress-address' in settings:
1320 return [_to_range(settings['ingress-address'])]
1321 if 'private-address' in settings:
1322 return [_to_range(settings['private-address'])]
1323 return [] # Should never happen
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index ae36574..e9fd38a 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -1,3 +1,17 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
1"""Tools for working with the host system"""15"""Tools for working with the host system"""
2# Copyright 2012 Canonical Ltd.16# Copyright 2012 Canonical Ltd.
3#17#
@@ -6,60 +20,332 @@
6# Matthew Wedgwood <matthew.wedgwood@canonical.com>20# Matthew Wedgwood <matthew.wedgwood@canonical.com>
721
8import os22import os
23import re
9import pwd24import pwd
25import glob
10import grp26import grp
11import random27import random
12import string28import string
13import subprocess29import subprocess
14import hashlib30import hashlib
31import functools
32import itertools
33import six
1534
35from contextlib import contextmanager
16from collections import OrderedDict36from collections import OrderedDict
37from .hookenv import log, DEBUG, local_unit
38from .fstab import Fstab
39from charmhelpers.osplatform import get_platform
40
41__platform__ = get_platform()
42if __platform__ == "ubuntu":
43 from charmhelpers.core.host_factory.ubuntu import (
44 service_available,
45 add_new_group,
46 lsb_release,
47 cmp_pkgrevno,
48 CompareHostReleases,
49 ) # flake8: noqa -- ignore F401 for this import
50elif __platform__ == "centos":
51 from charmhelpers.core.host_factory.centos import (
52 service_available,
53 add_new_group,
54 lsb_release,
55 cmp_pkgrevno,
56 CompareHostReleases,
57 ) # flake8: noqa -- ignore F401 for this import
58
59UPDATEDB_PATH = '/etc/updatedb.conf'
60
61def service_start(service_name, **kwargs):
62 """Start a system service.
63
64 The specified service name is managed via the system level init system.
65 Some init systems (e.g. upstart) require that additional arguments be
66 provided in order to directly control service instances whereas other init
67 systems allow for addressing instances of a service directly by name (e.g.
68 systemd).
69
70 The kwargs allow for the additional parameters to be passed to underlying
71 init systems for those systems which require/allow for them. For example,
72 the ceph-osd upstart script requires the id parameter to be passed along
73 in order to identify which running daemon should be reloaded. The follow-
74 ing example stops the ceph-osd service for instance id=4:
75
76 service_stop('ceph-osd', id=4)
77
78 :param service_name: the name of the service to stop
79 :param **kwargs: additional parameters to pass to the init system when
80 managing services. These will be passed as key=value
81 parameters to the init system's commandline. kwargs
82 are ignored for systemd enabled systems.
83 """
84 return service('start', service_name, **kwargs)
85
86
87def service_stop(service_name, **kwargs):
88 """Stop a system service.
89
90 The specified service name is managed via the system level init system.
91 Some init systems (e.g. upstart) require that additional arguments be
92 provided in order to directly control service instances whereas other init
93 systems allow for addressing instances of a service directly by name (e.g.
94 systemd).
95
96 The kwargs allow for the additional parameters to be passed to underlying
97 init systems for those systems which require/allow for them. For example,
98 the ceph-osd upstart script requires the id parameter to be passed along
99 in order to identify which running daemon should be reloaded. The follow-
100 ing example stops the ceph-osd service for instance id=4:
101
102 service_stop('ceph-osd', id=4)
103
104 :param service_name: the name of the service to stop
105 :param **kwargs: additional parameters to pass to the init system when
106 managing services. These will be passed as key=value
107 parameters to the init system's commandline. kwargs
108 are ignored for systemd enabled systems.
109 """
110 return service('stop', service_name, **kwargs)
111
112
113def service_restart(service_name, **kwargs):
114 """Restart a system service.
115
116 The specified service name is managed via the system level init system.
117 Some init systems (e.g. upstart) require that additional arguments be
118 provided in order to directly control service instances whereas other init
119 systems allow for addressing instances of a service directly by name (e.g.
120 systemd).
121
122 The kwargs allow for the additional parameters to be passed to underlying
123 init systems for those systems which require/allow for them. For example,
124 the ceph-osd upstart script requires the id parameter to be passed along
125 in order to identify which running daemon should be restarted. The follow-
126 ing example restarts the ceph-osd service for instance id=4:
127
128 service_restart('ceph-osd', id=4)
17129
18from hookenv import log130 :param service_name: the name of the service to restart
131 :param **kwargs: additional parameters to pass to the init system when
132 managing services. These will be passed as key=value
133 parameters to the init system's commandline. kwargs
134 are ignored for init systems not allowing additional
135 parameters via the commandline (systemd).
136 """
137 return service('restart', service_name)
19138
20139
21def service_start(service_name):140def service_reload(service_name, restart_on_failure=False, **kwargs):
22 service('start', service_name)141 """Reload a system service, optionally falling back to restart if
142 reload fails.
23143
144 The specified service name is managed via the system level init system.
145 Some init systems (e.g. upstart) require that additional arguments be
146 provided in order to directly control service instances whereas other init
147 systems allow for addressing instances of a service directly by name (e.g.
148 systemd).
24149
25def service_stop(service_name):150 The kwargs allow for the additional parameters to be passed to underlying
26 service('stop', service_name)151 init systems for those systems which require/allow for them. For example,
152 the ceph-osd upstart script requires the id parameter to be passed along
153 in order to identify which running daemon should be reloaded. The follow-
154 ing example restarts the ceph-osd service for instance id=4:
27155
156 service_reload('ceph-osd', id=4)
28157
29def service_restart(service_name):158 :param service_name: the name of the service to reload
30 service('restart', service_name)159 :param restart_on_failure: boolean indicating whether to fallback to a
160 restart if the reload fails.
161 :param **kwargs: additional parameters to pass to the init system when
162 managing services. These will be passed as key=value
163 parameters to the init system's commandline. kwargs
164 are ignored for init systems not allowing additional
165 parameters via the commandline (systemd).
166 """
167 service_result = service('reload', service_name, **kwargs)
168 if not service_result and restart_on_failure:
169 service_result = service('restart', service_name, **kwargs)
170 return service_result
31171
32172
33def service_reload(service_name, restart_on_failure=False):173def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
34 if not service('reload', service_name) and restart_on_failure:174 **kwargs):
35 service('restart', service_name)175 """Pause a system service.
36176
177 Stop it, and prevent it from starting again at boot.
37178
38def service(action, service_name):179 :param service_name: the name of the service to pause
39 cmd = ['service', service_name, action]180 :param init_dir: path to the upstart init directory
181 :param initd_dir: path to the sysv init directory
182 :param **kwargs: additional parameters to pass to the init system when
183 managing services. These will be passed as key=value
184 parameters to the init system's commandline. kwargs
185 are ignored for init systems which do not support
186 key=value arguments via the commandline.
187 """
188 stopped = True
189 if service_running(service_name, **kwargs):
190 stopped = service_stop(service_name, **kwargs)
191 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
192 sysv_file = os.path.join(initd_dir, service_name)
193 if init_is_systemd():
194 service('disable', service_name)
195 service('mask', service_name)
196 elif os.path.exists(upstart_file):
197 override_path = os.path.join(
198 init_dir, '{}.override'.format(service_name))
199 with open(override_path, 'w') as fh:
200 fh.write("manual\n")
201 elif os.path.exists(sysv_file):
202 subprocess.check_call(["update-rc.d", service_name, "disable"])
203 else:
204 raise ValueError(
205 "Unable to detect {0} as SystemD, Upstart {1} or"
206 " SysV {2}".format(
207 service_name, upstart_file, sysv_file))
208 return stopped
209
210
211def service_resume(service_name, init_dir="/etc/init",
212 initd_dir="/etc/init.d", **kwargs):
213 """Resume a system service.
214
215 Reenable starting again at boot. Start the service.
216
217 :param service_name: the name of the service to resume
218 :param init_dir: the path to the init dir
219 :param initd dir: the path to the initd dir
220 :param **kwargs: additional parameters to pass to the init system when
221 managing services. These will be passed as key=value
222 parameters to the init system's commandline. kwargs
223 are ignored for systemd enabled systems.
224 """
225 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
226 sysv_file = os.path.join(initd_dir, service_name)
227 if init_is_systemd():
228 service('unmask', service_name)
229 service('enable', service_name)
230 elif os.path.exists(upstart_file):
231 override_path = os.path.join(
232 init_dir, '{}.override'.format(service_name))
233 if os.path.exists(override_path):
234 os.unlink(override_path)
235 elif os.path.exists(sysv_file):
236 subprocess.check_call(["update-rc.d", service_name, "enable"])
237 else:
238 raise ValueError(
239 "Unable to detect {0} as SystemD, Upstart {1} or"
240 " SysV {2}".format(
241 service_name, upstart_file, sysv_file))
242 started = service_running(service_name, **kwargs)
243
244 if not started:
245 started = service_start(service_name, **kwargs)
246 return started
247
248
249def service(action, service_name, **kwargs):
250 """Control a system service.
251
252 :param action: the action to take on the service
253 :param service_name: the name of the service to perform th action on
254 :param **kwargs: additional params to be passed to the service command in
255 the form of key=value.
256 """
257 if init_is_systemd():
258 cmd = ['systemctl', action, service_name]
259 else:
260 cmd = ['service', service_name, action]
261 for key, value in six.iteritems(kwargs):
262 parameter = '%s=%s' % (key, value)
263 cmd.append(parameter)
40 return subprocess.call(cmd) == 0264 return subprocess.call(cmd) == 0
41265
42266
43def service_running(service):267_UPSTART_CONF = "/etc/init/{}.conf"
44 try:268_INIT_D_CONF = "/etc/init.d/{}"
45 output = subprocess.check_output(['service', service, 'status'])269
46 except subprocess.CalledProcessError:270
47 return False271def service_running(service_name, **kwargs):
272 """Determine whether a system service is running.
273
274 :param service_name: the name of the service
275 :param **kwargs: additional args to pass to the service command. This is
276 used to pass additional key=value arguments to the
277 service command line for managing specific instance
278 units (e.g. service ceph-osd status id=2). The kwargs
279 are ignored in systemd services.
280 """
281 if init_is_systemd():
282 return service('is-active', service_name)
48 else:283 else:
49 if ("start/running" in output or "is running" in output):284 if os.path.exists(_UPSTART_CONF.format(service_name)):
50 return True285 try:
51 else:286 cmd = ['status', service_name]
52 return False287 for key, value in six.iteritems(kwargs):
288 parameter = '%s=%s' % (key, value)
289 cmd.append(parameter)
290 output = subprocess.check_output(cmd,
291 stderr=subprocess.STDOUT).decode('UTF-8')
292 except subprocess.CalledProcessError:
293 return False
294 else:
295 # This works for upstart scripts where the 'service' command
296 # returns a consistent string to represent running
297 # 'start/running'
298 if ("start/running" in output or
299 "is running" in output or
300 "up and running" in output):
301 return True
302 elif os.path.exists(_INIT_D_CONF.format(service_name)):
303 # Check System V scripts init script return codes
304 return service('status', service_name)
305 return False
306
307
308SYSTEMD_SYSTEM = '/run/systemd/system'
53309
54310
55def adduser(username, password=None, shell='/bin/bash', system_user=False):311def init_is_systemd():
56 """Add a user"""312 """Return True if the host system uses systemd, False otherwise."""
313 if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
314 return False
315 return os.path.isdir(SYSTEMD_SYSTEM)
316
317
318def adduser(username, password=None, shell='/bin/bash',
319 system_user=False, primary_group=None,
320 secondary_groups=None, uid=None, home_dir=None):
321 """Add a user to the system.
322
323 Will log but otherwise succeed if the user already exists.
324
325 :param str username: Username to create
326 :param str password: Password for user; if ``None``, create a system user
327 :param str shell: The default shell for the user
328 :param bool system_user: Whether to create a login or system user
329 :param str primary_group: Primary group for user; defaults to username
330 :param list secondary_groups: Optional list of additional groups
331 :param int uid: UID for user being created
332 :param str home_dir: Home directory for user
333
334 :returns: The password database entry struct, as returned by `pwd.getpwnam`
335 """
57 try:336 try:
58 user_info = pwd.getpwnam(username)337 user_info = pwd.getpwnam(username)
59 log('user {0} already exists!'.format(username))338 log('user {0} already exists!'.format(username))
339 if uid:
340 user_info = pwd.getpwuid(int(uid))
341 log('user with uid {0} already exists!'.format(uid))
60 except KeyError:342 except KeyError:
61 log('creating user {0}'.format(username))343 log('creating user {0}'.format(username))
62 cmd = ['useradd']344 cmd = ['useradd']
345 if uid:
346 cmd.extend(['--uid', str(uid)])
347 if home_dir:
348 cmd.extend(['--home', str(home_dir)])
63 if system_user or password is None:349 if system_user or password is None:
64 cmd.append('--system')350 cmd.append('--system')
65 else:351 else:
@@ -68,32 +354,147 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
68 '--shell', shell,354 '--shell', shell,
69 '--password', password,355 '--password', password,
70 ])356 ])
357 if not primary_group:
358 try:
359 grp.getgrnam(username)
360 primary_group = username # avoid "group exists" error
361 except KeyError:
362 pass
363 if primary_group:
364 cmd.extend(['-g', primary_group])
365 if secondary_groups:
366 cmd.extend(['-G', ','.join(secondary_groups)])
71 cmd.append(username)367 cmd.append(username)
72 subprocess.check_call(cmd)368 subprocess.check_call(cmd)
73 user_info = pwd.getpwnam(username)369 user_info = pwd.getpwnam(username)
74 return user_info370 return user_info
75371
76372
373def user_exists(username):
374 """Check if a user exists"""
375 try:
376 pwd.getpwnam(username)
377 user_exists = True
378 except KeyError:
379 user_exists = False
380 return user_exists
381
382
383def uid_exists(uid):
384 """Check if a uid exists"""
385 try:
386 pwd.getpwuid(uid)
387 uid_exists = True
388 except KeyError:
389 uid_exists = False
390 return uid_exists
391
392
393def group_exists(groupname):
394 """Check if a group exists"""
395 try:
396 grp.getgrnam(groupname)
397 group_exists = True
398 except KeyError:
399 group_exists = False
400 return group_exists
401
402
403def gid_exists(gid):
404 """Check if a gid exists"""
405 try:
406 grp.getgrgid(gid)
407 gid_exists = True
408 except KeyError:
409 gid_exists = False
410 return gid_exists
411
412
413def add_group(group_name, system_group=False, gid=None):
414 """Add a group to the system
415
416 Will log but otherwise succeed if the group already exists.
417
418 :param str group_name: group to create
419 :param bool system_group: Create system group
420 :param int gid: GID for user being created
421
422 :returns: The password database entry struct, as returned by `grp.getgrnam`
423 """
424 try:
425 group_info = grp.getgrnam(group_name)
426 log('group {0} already exists!'.format(group_name))
427 if gid:
428 group_info = grp.getgrgid(gid)
429 log('group with gid {0} already exists!'.format(gid))
430 except KeyError:
431 log('creating group {0}'.format(group_name))
432 add_new_group(group_name, system_group, gid)
433 group_info = grp.getgrnam(group_name)
434 return group_info
435
436
77def add_user_to_group(username, group):437def add_user_to_group(username, group):
78 """Add a user to a group"""438 """Add a user to a group"""
79 cmd = [439 cmd = ['gpasswd', '-a', username, group]
80 'gpasswd', '-a',
81 username,
82 group
83 ]
84 log("Adding user {} to group {}".format(username, group))440 log("Adding user {} to group {}".format(username, group))
85 subprocess.check_call(cmd)441 subprocess.check_call(cmd)
86442
87443
88def rsync(from_path, to_path, flags='-r', options=None):444def chage(username, lastday=None, expiredate=None, inactive=None,
445 mindays=None, maxdays=None, root=None, warndays=None):
446 """Change user password expiry information
447
448 :param str username: User to update
449 :param str lastday: Set when password was changed in YYYY-MM-DD format
450 :param str expiredate: Set when user's account will no longer be
451 accessible in YYYY-MM-DD format.
452 -1 will remove an account expiration date.
453 :param str inactive: Set the number of days of inactivity after a password
454 has expired before the account is locked.
455 -1 will remove an account's inactivity.
456 :param str mindays: Set the minimum number of days between password
457 changes to MIN_DAYS.
458 0 indicates the password can be changed anytime.
459 :param str maxdays: Set the maximum number of days during which a
460 password is valid.
461 -1 as MAX_DAYS will remove checking maxdays
462 :param str root: Apply changes in the CHROOT_DIR directory
463 :param str warndays: Set the number of days of warning before a password
464 change is required
465 :raises subprocess.CalledProcessError: if call to chage fails
466 """
467 cmd = ['chage']
468 if root:
469 cmd.extend(['--root', root])
470 if lastday:
471 cmd.extend(['--lastday', lastday])
472 if expiredate:
473 cmd.extend(['--expiredate', expiredate])
474 if inactive:
475 cmd.extend(['--inactive', inactive])
476 if mindays:
477 cmd.extend(['--mindays', mindays])
478 if maxdays:
479 cmd.extend(['--maxdays', maxdays])
480 if warndays:
481 cmd.extend(['--warndays', warndays])
482 cmd.append(username)
483 subprocess.check_call(cmd)
484
485remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
486
487def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
89 """Replicate the contents of a path"""488 """Replicate the contents of a path"""
90 options = options or ['--delete', '--executability']489 options = options or ['--delete', '--executability']
91 cmd = ['/usr/bin/rsync', flags]490 cmd = ['/usr/bin/rsync', flags]
491 if timeout:
492 cmd = ['timeout', str(timeout)] + cmd
92 cmd.extend(options)493 cmd.extend(options)
93 cmd.append(from_path)494 cmd.append(from_path)
94 cmd.append(to_path)495 cmd.append(to_path)
95 log(" ".join(cmd))496 log(" ".join(cmd))
96 return subprocess.check_output(cmd).strip()497 return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
97498
98499
99def symlink(source, destination):500def symlink(source, destination):
@@ -108,66 +509,105 @@ def symlink(source, destination):
108 subprocess.check_call(cmd)509 subprocess.check_call(cmd)
109510
110511
111def mkdir(path, owner='root', group='root', perms=0555, force=False):512def mkdir(path, owner='root', group='root', perms=0o555, force=False):
112 """Create a directory"""513 """Create a directory"""
113 log("Making dir {} {}:{} {:o}".format(path, owner, group,514 log("Making dir {} {}:{} {:o}".format(path, owner, group,
114 perms))515 perms))
115 uid = pwd.getpwnam(owner).pw_uid516 uid = pwd.getpwnam(owner).pw_uid
116 gid = grp.getgrnam(group).gr_gid517 gid = grp.getgrnam(group).gr_gid
117 realpath = os.path.abspath(path)518 realpath = os.path.abspath(path)
118 if os.path.exists(realpath):519 path_exists = os.path.exists(realpath)
119 if force and not os.path.isdir(realpath):520 if path_exists and force:
521 if not os.path.isdir(realpath):
120 log("Removing non-directory file {} prior to mkdir()".format(path))522 log("Removing non-directory file {} prior to mkdir()".format(path))
121 os.unlink(realpath)523 os.unlink(realpath)
122 else:524 os.makedirs(realpath, perms)
525 elif not path_exists:
123 os.makedirs(realpath, perms)526 os.makedirs(realpath, perms)
124 os.chown(realpath, uid, gid)527 os.chown(realpath, uid, gid)
528 os.chmod(realpath, perms)
125529
126530
127def write_file(path, content, owner='root', group='root', perms=0444):531def write_file(path, content, owner='root', group='root', perms=0o444):
128 """Create or overwrite a file with the contents of a string"""532 """Create or overwrite a file with the contents of a byte string."""
129 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
130 uid = pwd.getpwnam(owner).pw_uid533 uid = pwd.getpwnam(owner).pw_uid
131 gid = grp.getgrnam(group).gr_gid534 gid = grp.getgrnam(group).gr_gid
132 with open(path, 'w') as target:535 # lets see if we can grab the file and compare the context, to avoid doing
133 os.fchown(target.fileno(), uid, gid)536 # a write.
134 os.fchmod(target.fileno(), perms)537 existing_content = None
135 target.write(content)538 existing_uid, existing_gid = None, None
539 try:
540 with open(path, 'rb') as target:
541 existing_content = target.read()
542 stat = os.stat(path)
543 existing_uid, existing_gid = stat.st_uid, stat.st_gid
544 except:
545 pass
546 if content != existing_content:
547 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
548 level=DEBUG)
549 with open(path, 'wb') as target:
550 os.fchown(target.fileno(), uid, gid)
551 os.fchmod(target.fileno(), perms)
552 if six.PY3 and isinstance(content, six.string_types):
553 content = content.encode('UTF-8')
554 target.write(content)
555 return
556 # the contents were the same, but we might still need to change the
557 # ownership.
558 if existing_uid != uid:
559 log("Changing uid on already existing content: {} -> {}"
560 .format(existing_uid, uid), level=DEBUG)
561 os.chown(path, uid, -1)
562 if existing_gid != gid:
563 log("Changing gid on already existing content: {} -> {}"
564 .format(existing_gid, gid), level=DEBUG)
565 os.chown(path, -1, gid)
566
567
568def fstab_remove(mp):
569 """Remove the given mountpoint entry from /etc/fstab"""
570 return Fstab.remove_by_mountpoint(mp)
136571
137572
138def mount(device, mountpoint, options=None, persist=False):573def fstab_add(dev, mp, fs, options=None):
139 '''Mount a filesystem'''574 """Adds the given device entry to the /etc/fstab file"""
575 return Fstab.add(dev, mp, fs, options=options)
576
577
578def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
579 """Mount a filesystem at a particular mountpoint"""
140 cmd_args = ['mount']580 cmd_args = ['mount']
141 if options is not None:581 if options is not None:
142 cmd_args.extend(['-o', options])582 cmd_args.extend(['-o', options])
143 cmd_args.extend([device, mountpoint])583 cmd_args.extend([device, mountpoint])
144 try:584 try:
145 subprocess.check_output(cmd_args)585 subprocess.check_output(cmd_args)
146 except subprocess.CalledProcessError, e:586 except subprocess.CalledProcessError as e:
147 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))587 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
148 return False588 return False
589
149 if persist:590 if persist:
150 # TODO: update fstab591 return fstab_add(device, mountpoint, filesystem, options=options)
151 pass
152 return True592 return True
153593
154594
155def umount(mountpoint, persist=False):595def umount(mountpoint, persist=False):
156 '''Unmount a filesystem'''596 """Unmount a filesystem"""
157 cmd_args = ['umount', mountpoint]597 cmd_args = ['umount', mountpoint]
158 try:598 try:
159 subprocess.check_output(cmd_args)599 subprocess.check_output(cmd_args)
160 except subprocess.CalledProcessError, e:600 except subprocess.CalledProcessError as e:
161 log('Error unmounting {}\n{}'.format(mountpoint, e.output))601 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
162 return False602 return False
603
163 if persist:604 if persist:
164 # TODO: update fstab605 return fstab_remove(mountpoint)
165 pass
166 return True606 return True
167607
168608
169def mounts():609def mounts():
170 '''List of all mounted volumes as [[mountpoint,device],[...]]'''610 """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
171 with open('/proc/mounts') as f:611 with open('/proc/mounts') as f:
172 # [['/mount/point','/dev/path'],[...]]612 # [['/mount/point','/dev/path'],[...]]
173 system_mounts = [m[1::-1] for m in [l.strip().split()613 system_mounts = [m[1::-1] for m in [l.strip().split()
@@ -175,65 +615,428 @@ def mounts():
175 return system_mounts615 return system_mounts
176616
177617
178def file_hash(path):618def fstab_mount(mountpoint):
179 ''' Generate a md5 hash of the contents of 'path' or None if not found '''619 """Mount filesystem using fstab"""
620 cmd_args = ['mount', mountpoint]
621 try:
622 subprocess.check_output(cmd_args)
623 except subprocess.CalledProcessError as e:
624 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
625 return False
626 return True
627
628
629def file_hash(path, hash_type='md5'):
630 """Generate a hash checksum of the contents of 'path' or None if not found.
631
632 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
633 such as md5, sha1, sha256, sha512, etc.
634 """
180 if os.path.exists(path):635 if os.path.exists(path):
181 h = hashlib.md5()636 h = getattr(hashlib, hash_type)()
182 with open(path, 'r') as source:637 with open(path, 'rb') as source:
183 h.update(source.read()) # IGNORE:E1101 - it does have update638 h.update(source.read())
184 return h.hexdigest()639 return h.hexdigest()
185 else:640 else:
186 return None641 return None
187642
188643
189def restart_on_change(restart_map):644def path_hash(path):
190 ''' Restart services based on configuration files changing645 """Generate a hash checksum of all files matching 'path'. Standard
646 wildcards like '*' and '?' are supported, see documentation for the 'glob'
647 module for more information.
648
649 :return: dict: A { filename: hash } dictionary for all matched files.
650 Empty if none found.
651 """
652 return {
653 filename: file_hash(filename)
654 for filename in glob.iglob(path)
655 }
656
191657
192 This function is used a decorator, for example658def check_hash(path, checksum, hash_type='md5'):
659 """Validate a file using a cryptographic checksum.
660
661 :param str checksum: Value of the checksum used to validate the file.
662 :param str hash_type: Hash algorithm used to generate `checksum`.
663 Can be any hash alrgorithm supported by :mod:`hashlib`,
664 such as md5, sha1, sha256, sha512, etc.
665 :raises ChecksumError: If the file fails the checksum
666
667 """
668 actual_checksum = file_hash(path, hash_type)
669 if checksum != actual_checksum:
670 raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
671
672
673class ChecksumError(ValueError):
674 """A class derived from Value error to indicate the checksum failed."""
675 pass
676
677
678def restart_on_change(restart_map, stopstart=False, restart_functions=None):
679 """Restart services based on configuration files changing
680
681 This function is used a decorator, for example::
193682
194 @restart_on_change({683 @restart_on_change({
195 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]684 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
685 '/etc/apache/sites-enabled/*': [ 'apache2' ]
196 })686 })
197 def ceph_client_changed():687 def config_changed():
198 ...688 pass # your code here
199689
200 In this example, the cinder-api and cinder-volume services690 In this example, the cinder-api and cinder-volume services
201 would be restarted if /etc/ceph/ceph.conf is changed by the691 would be restarted if /etc/ceph/ceph.conf is changed by the
202 ceph_client_changed function.692 ceph_client_changed function. The apache2 service would be
203 '''693 restarted if any file matching the pattern got changed, created
694 or removed. Standard wildcards are supported, see documentation
695 for the 'glob' module for more information.
696
697 @param restart_map: {path_file_name: [service_name, ...]
698 @param stopstart: DEFAULT false; whether to stop, start OR restart
699 @param restart_functions: nonstandard functions to use to restart services
700 {svc: func, ...}
701 @returns result from decorated function
702 """
204 def wrap(f):703 def wrap(f):
205 def wrapped_f(*args):704 @functools.wraps(f)
206 checksums = {}705 def wrapped_f(*args, **kwargs):
207 for path in restart_map:706 return restart_on_change_helper(
208 checksums[path] = file_hash(path)707 (lambda: f(*args, **kwargs)), restart_map, stopstart,
209 f(*args)708 restart_functions)
210 restarts = []
211 for path in restart_map:
212 if checksums[path] != file_hash(path):
213 restarts += restart_map[path]
214 for service_name in list(OrderedDict.fromkeys(restarts)):
215 service('restart', service_name)
216 return wrapped_f709 return wrapped_f
217 return wrap710 return wrap
218711
219712
220def lsb_release():713def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
221 '''Return /etc/lsb-release in a dict'''714 restart_functions=None):
222 d = {}715 """Helper function to perform the restart_on_change function.
223 with open('/etc/lsb-release', 'r') as lsb:716
224 for l in lsb:717 This is provided for decorators to restart services if files described
225 k, v = l.split('=')718 in the restart_map have changed after an invocation of lambda_f().
226 d[k.strip()] = v.strip()719
227 return d720 @param lambda_f: function to call.
721 @param restart_map: {file: [service, ...]}
722 @param stopstart: whether to stop, start or restart a service
723 @param restart_functions: nonstandard functions to use to restart services
724 {svc: func, ...}
725 @returns result of lambda_f()
726 """
727 if restart_functions is None:
728 restart_functions = {}
729 checksums = {path: path_hash(path) for path in restart_map}
730 r = lambda_f()
731 # create a list of lists of the services to restart
732 restarts = [restart_map[path]
733 for path in restart_map
734 if path_hash(path) != checksums[path]]
735 # create a flat list of ordered services without duplicates from lists
736 services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
737 if services_list:
738 actions = ('stop', 'start') if stopstart else ('restart',)
739 for service_name in services_list:
740 if service_name in restart_functions:
741 restart_functions[service_name](service_name)
742 else:
743 for action in actions:
744 service(action, service_name)
745 return r
228746
229747
230def pwgen(length=None):748def pwgen(length=None):
231 '''Generate a random pasword.'''749 """Generate a random pasword."""
232 if length is None:750 if length is None:
751 # A random length is ok to use a weak PRNG
233 length = random.choice(range(35, 45))752 length = random.choice(range(35, 45))
234 alphanumeric_chars = [753 alphanumeric_chars = [
235 l for l in (string.letters + string.digits)754 l for l in (string.ascii_letters + string.digits)
236 if l not in 'l0QD1vAEIOUaeiou']755 if l not in 'l0QD1vAEIOUaeiou']
756 # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
757 # actual password
758 random_generator = random.SystemRandom()
237 random_chars = [759 random_chars = [
238 random.choice(alphanumeric_chars) for _ in range(length)]760 random_generator.choice(alphanumeric_chars) for _ in range(length)]
239 return(''.join(random_chars))761 return(''.join(random_chars))
762
763
764def is_phy_iface(interface):
765 """Returns True if interface is not virtual, otherwise False."""
766 if interface:
767 sys_net = '/sys/class/net'
768 if os.path.isdir(sys_net):
769 for iface in glob.glob(os.path.join(sys_net, '*')):
770 if '/virtual/' in os.path.realpath(iface):
771 continue
772
773 if interface == os.path.basename(iface):
774 return True
775
776 return False
777
778
779def get_bond_master(interface):
780 """Returns bond master if interface is bond slave otherwise None.
781
782 NOTE: the provided interface is expected to be physical
783 """
784 if interface:
785 iface_path = '/sys/class/net/%s' % (interface)
786 if os.path.exists(iface_path):
787 if '/virtual/' in os.path.realpath(iface_path):
788 return None
789
790 master = os.path.join(iface_path, 'master')
791 if os.path.exists(master):
792 master = os.path.realpath(master)
793 # make sure it is a bond master
794 if os.path.exists(os.path.join(master, 'bonding')):
795 return os.path.basename(master)
796
797 return None
798
799
800def list_nics(nic_type=None):
801 """Return a list of nics of given type(s)"""
802 if isinstance(nic_type, six.string_types):
803 int_types = [nic_type]
804 else:
805 int_types = nic_type
806
807 interfaces = []
808 if nic_type:
809 for int_type in int_types:
810 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
811 ip_output = subprocess.check_output(cmd).decode('UTF-8')
812 ip_output = ip_output.split('\n')
813 ip_output = (line for line in ip_output if line)
814 for line in ip_output:
815 if line.split()[1].startswith(int_type):
816 matched = re.search('.*: (' + int_type +
817 r'[0-9]+\.[0-9]+)@.*', line)
818 if matched:
819 iface = matched.groups()[0]
820 else:
821 iface = line.split()[1].replace(":", "")
822
823 if iface not in interfaces:
824 interfaces.append(iface)
825 else:
826 cmd = ['ip', 'a']
827 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
828 ip_output = (line.strip() for line in ip_output if line)
829
830 key = re.compile('^[0-9]+:\s+(.+):')
831 for line in ip_output:
832 matched = re.search(key, line)
833 if matched:
834 iface = matched.group(1)
835 iface = iface.partition("@")[0]
836 if iface not in interfaces:
837 interfaces.append(iface)
838
839 return interfaces
840
841
842def set_nic_mtu(nic, mtu):
843 """Set the Maximum Transmission Unit (MTU) on a network interface."""
844 cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
845 subprocess.check_call(cmd)
846
847
848def get_nic_mtu(nic):
849 """Return the Maximum Transmission Unit (MTU) for a network interface."""
850 cmd = ['ip', 'addr', 'show', nic]
851 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
852 mtu = ""
853 for line in ip_output:
854 words = line.split()
855 if 'mtu' in words:
856 mtu = words[words.index("mtu") + 1]
857 return mtu
858
859
860def get_nic_hwaddr(nic):
861 """Return the Media Access Control (MAC) for a network interface."""
862 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
863 ip_output = subprocess.check_output(cmd).decode('UTF-8')
864 hwaddr = ""
865 words = ip_output.split()
866 if 'link/ether' in words:
867 hwaddr = words[words.index('link/ether') + 1]
868 return hwaddr
869
870
871@contextmanager
872def chdir(directory):
873 """Change the current working directory to a different directory for a code
874 block and return the previous directory after the block exits. Useful to
875 run commands from a specificed directory.
876
877 :param str directory: The directory path to change to for this context.
878 """
879 cur = os.getcwd()
880 try:
881 yield os.chdir(directory)
882 finally:
883 os.chdir(cur)
884
885
886def chownr(path, owner, group, follow_links=True, chowntopdir=False):
887 """Recursively change user and group ownership of files and directories
888 in given path. Doesn't chown path itself by default, only its children.
889
890 :param str path: The string path to start changing ownership.
891 :param str owner: The owner string to use when looking up the uid.
892 :param str group: The group string to use when looking up the gid.
893 :param bool follow_links: Also follow and chown links if True
894 :param bool chowntopdir: Also chown path itself if True
895 """
896 uid = pwd.getpwnam(owner).pw_uid
897 gid = grp.getgrnam(group).gr_gid
898 if follow_links:
899 chown = os.chown
900 else:
901 chown = os.lchown
902
903 if chowntopdir:
904 broken_symlink = os.path.lexists(path) and not os.path.exists(path)
905 if not broken_symlink:
906 chown(path, uid, gid)
907 for root, dirs, files in os.walk(path, followlinks=follow_links):
908 for name in dirs + files:
909 full = os.path.join(root, name)
910 broken_symlink = os.path.lexists(full) and not os.path.exists(full)
911 if not broken_symlink:
912 chown(full, uid, gid)
913
914
915def lchownr(path, owner, group):
916 """Recursively change user and group ownership of files and directories
917 in a given path, not following symbolic links. See the documentation for
918 'os.lchown' for more information.
919
920 :param str path: The string path to start changing ownership.
921 :param str owner: The owner string to use when looking up the uid.
922 :param str group: The group string to use when looking up the gid.
923 """
924 chownr(path, owner, group, follow_links=False)
925
926
927def owner(path):
928 """Returns a tuple containing the username & groupname owning the path.
929
930 :param str path: the string path to retrieve the ownership
931 :return tuple(str, str): A (username, groupname) tuple containing the
932 name of the user and group owning the path.
933 :raises OSError: if the specified path does not exist
934 """
935 stat = os.stat(path)
936 username = pwd.getpwuid(stat.st_uid)[0]
937 groupname = grp.getgrgid(stat.st_gid)[0]
938 return username, groupname
939
940
941def get_total_ram():
942 """The total amount of system RAM in bytes.
943
944 This is what is reported by the OS, and may be overcommitted when
945 there are multiple containers hosted on the same machine.
946 """
947 with open('/proc/meminfo', 'r') as f:
948 for line in f.readlines():
949 if line:
950 key, value, unit = line.split()
951 if key == 'MemTotal:':
952 assert unit == 'kB', 'Unknown unit'
953 return int(value) * 1024 # Classic, not KiB.
954 raise NotImplementedError()
955
956
957UPSTART_CONTAINER_TYPE = '/run/container_type'
958
959
960def is_container():
961 """Determine whether unit is running in a container
962
963 @return: boolean indicating if unit is in a container
964 """
965 if init_is_systemd():
966 # Detect using systemd-detect-virt
967 return subprocess.call(['systemd-detect-virt',
968 '--container']) == 0
969 else:
970 # Detect using upstart container file marker
971 return os.path.exists(UPSTART_CONTAINER_TYPE)
972
973
974def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
975 """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
976
977 This method has no effect if the path specified by updatedb_path does not
978 exist or is not a file.
979
980 @param path: string the path to add to the updatedb.conf PRUNEPATHS value
981 @param updatedb_path: the path the updatedb.conf file
982 """
983 if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
984 # If the updatedb.conf file doesn't exist then don't attempt to update
985 # the file as the package providing mlocate may not be installed on
986 # the local system
987 return
988
989 with open(updatedb_path, 'r+') as f_id:
990 updatedb_text = f_id.read()
991 output = updatedb(updatedb_text, path)
992 f_id.seek(0)
993 f_id.write(output)
994 f_id.truncate()
995
996
997def updatedb(updatedb_text, new_path):
998 lines = [line for line in updatedb_text.split("\n")]
999 for i, line in enumerate(lines):
1000 if line.startswith("PRUNEPATHS="):
1001 paths_line = line.split("=")[1].replace('"', '')
1002 paths = paths_line.split(" ")
1003 if new_path not in paths:
1004 paths.append(new_path)
1005 lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
1006 output = "\n".join(lines)
1007 return output
1008
1009
1010def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
1011 """ Modulo distribution
1012
1013 This helper uses the unit number, a modulo value and a constant wait time
1014 to produce a calculated wait time distribution. This is useful in large
1015 scale deployments to distribute load during an expensive operation such as
1016 service restarts.
1017
1018 If you have 1000 nodes that need to restart 100 at a time 1 minute at a
1019 time:
1020
1021 time.wait(modulo_distribution(modulo=100, wait=60))
1022 restart()
1023
1024 If you need restarts to happen serially set modulo to the exact number of
1025 nodes and set a high constant wait time:
1026
1027 time.wait(modulo_distribution(modulo=10, wait=120))
1028 restart()
1029
1030 @param modulo: int The modulo number creates the group distribution
1031 @param wait: int The constant time wait value
1032 @param non_zero_wait: boolean Override unit % modulo == 0,
1033 return modulo * wait. Used to avoid collisions with
1034 leader nodes which are often given priority.
1035 @return: int Calculated time to wait for unit operation
1036 """
1037 unit_number = int(local_unit().split('/')[1])
1038 calculated_wait_time = (unit_number % modulo) * wait
1039 if non_zero_wait and calculated_wait_time == 0:
1040 return modulo * wait
1041 else:
1042 return calculated_wait_time
diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py
240new file mode 1006441043new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/__init__.py
diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py
241new file mode 1006441044new file mode 100644
index 0000000..7781a39
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/centos.py
@@ -0,0 +1,72 @@
1import subprocess
2import yum
3import os
4
5from charmhelpers.core.strutils import BasicStringComparator
6
7
8class CompareHostReleases(BasicStringComparator):
9 """Provide comparisons of Host releases.
10
11 Use in the form of
12
13 if CompareHostReleases(release) > 'trusty':
14 # do something with mitaka
15 """
16
17 def __init__(self, item):
18 raise NotImplementedError(
19 "CompareHostReleases() is not implemented for CentOS")
20
21
22def service_available(service_name):
23 # """Determine whether a system service is available."""
24 if os.path.isdir('/run/systemd/system'):
25 cmd = ['systemctl', 'is-enabled', service_name]
26 else:
27 cmd = ['service', service_name, 'is-enabled']
28 return subprocess.call(cmd) == 0
29
30
31def add_new_group(group_name, system_group=False, gid=None):
32 cmd = ['groupadd']
33 if gid:
34 cmd.extend(['--gid', str(gid)])
35 if system_group:
36 cmd.append('-r')
37 cmd.append(group_name)
38 subprocess.check_call(cmd)
39
40
41def lsb_release():
42 """Return /etc/os-release in a dict."""
43 d = {}
44 with open('/etc/os-release', 'r') as lsb:
45 for l in lsb:
46 s = l.split('=')
47 if len(s) != 2:
48 continue
49 d[s[0].strip()] = s[1].strip()
50 return d
51
52
53def cmp_pkgrevno(package, revno, pkgcache=None):
54 """Compare supplied revno with the revno of the installed package.
55
56 * 1 => Installed revno is greater than supplied arg
57 * 0 => Installed revno is the same as supplied arg
58 * -1 => Installed revno is less than supplied arg
59
60 This function imports YumBase function if the pkgcache argument
61 is None.
62 """
63 if not pkgcache:
64 y = yum.YumBase()
65 packages = y.doPackageLists()
66 pkgcache = {i.Name: i.version for i in packages['installed']}
67 pkg = pkgcache[package]
68 if pkg > revno:
69 return 1
70 if pkg < revno:
71 return -1
72 return 0
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
0new file mode 10064473new file mode 100644
index 0000000..99451b5
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -0,0 +1,90 @@
1import subprocess
2
3from charmhelpers.core.strutils import BasicStringComparator
4
5
6UBUNTU_RELEASES = (
7 'lucid',
8 'maverick',
9 'natty',
10 'oneiric',
11 'precise',
12 'quantal',
13 'raring',
14 'saucy',
15 'trusty',
16 'utopic',
17 'vivid',
18 'wily',
19 'xenial',
20 'yakkety',
21 'zesty',
22 'artful',
23 'bionic',
24)
25
26
27class CompareHostReleases(BasicStringComparator):
28 """Provide comparisons of Ubuntu releases.
29
30 Use in the form of
31
32 if CompareHostReleases(release) > 'trusty':
33 # do something with mitaka
34 """
35 _list = UBUNTU_RELEASES
36
37
38def service_available(service_name):
39 """Determine whether a system service is available"""
40 try:
41 subprocess.check_output(
42 ['service', service_name, 'status'],
43 stderr=subprocess.STDOUT).decode('UTF-8')
44 except subprocess.CalledProcessError as e:
45 return b'unrecognized service' not in e.output
46 else:
47 return True
48
49
50def add_new_group(group_name, system_group=False, gid=None):
51 cmd = ['addgroup']
52 if gid:
53 cmd.extend(['--gid', str(gid)])
54 if system_group:
55 cmd.append('--system')
56 else:
57 cmd.extend([
58 '--group',
59 ])
60 cmd.append(group_name)
61 subprocess.check_call(cmd)
62
63
64def lsb_release():
65 """Return /etc/lsb-release in a dict"""
66 d = {}
67 with open('/etc/lsb-release', 'r') as lsb:
68 for l in lsb:
69 k, v = l.split('=')
70 d[k.strip()] = v.strip()
71 return d
72
73
74def cmp_pkgrevno(package, revno, pkgcache=None):
75 """Compare supplied revno with the revno of the installed package.
76
77 * 1 => Installed revno is greater than supplied arg
78 * 0 => Installed revno is the same as supplied arg
79 * -1 => Installed revno is less than supplied arg
80
81 This function imports apt_cache function from charmhelpers.fetch if
82 the pkgcache argument is None. Be sure to add charmhelpers.fetch if
83 you call this function, or pass an apt_pkg.Cache() instance.
84 """
85 import apt_pkg
86 if not pkgcache:
87 from charmhelpers.fetch import apt_cache
88 pkgcache = apt_cache()
89 pkg = pkgcache[package]
90 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py
0new file mode 10064491new file mode 100644
index 0000000..54b5b5e
--- /dev/null
+++ b/hooks/charmhelpers/core/hugepage.py
@@ -0,0 +1,69 @@
1# -*- coding: utf-8 -*-
2
3# Copyright 2014-2015 Canonical Limited.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17import yaml
18from charmhelpers.core import fstab
19from charmhelpers.core import sysctl
20from charmhelpers.core.host import (
21 add_group,
22 add_user_to_group,
23 fstab_mount,
24 mkdir,
25)
26from charmhelpers.core.strutils import bytes_from_string
27from subprocess import check_output
28
29
30def hugepage_support(user, group='hugetlb', nr_hugepages=256,
31 max_map_count=65536, mnt_point='/run/hugepages/kvm',
32 pagesize='2MB', mount=True, set_shmmax=False):
33 """Enable hugepages on system.
34
35 Args:
36 user (str) -- Username to allow access to hugepages to
37 group (str) -- Group name to own hugepages
38 nr_hugepages (int) -- Number of pages to reserve
39 max_map_count (int) -- Number of Virtual Memory Areas a process can own
40 mnt_point (str) -- Directory to mount hugepages on
41 pagesize (str) -- Size of hugepages
42 mount (bool) -- Whether to Mount hugepages
43 """
44 group_info = add_group(group)
45 gid = group_info.gr_gid
46 add_user_to_group(user, group)
47 if max_map_count < 2 * nr_hugepages:
48 max_map_count = 2 * nr_hugepages
49 sysctl_settings = {
50 'vm.nr_hugepages': nr_hugepages,
51 'vm.max_map_count': max_map_count,
52 'vm.hugetlb_shm_group': gid,
53 }
54 if set_shmmax:
55 shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
56 shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
57 if shmmax_minsize > shmmax_current:
58 sysctl_settings['kernel.shmmax'] = shmmax_minsize
59 sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
60 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
61 lfstab = fstab.Fstab()
62 fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
63 if fstab_entry:
64 lfstab.remove_entry(fstab_entry)
65 entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
66 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
67 lfstab.add_entry(entry)
68 if mount:
69 fstab_mount(mnt_point)
diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py
0new file mode 10064470new file mode 100644
index 0000000..2d40452
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel.py
@@ -0,0 +1,72 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18import re
19import subprocess
20
21from charmhelpers.osplatform import get_platform
22from charmhelpers.core.hookenv import (
23 log,
24 INFO
25)
26
27__platform__ = get_platform()
28if __platform__ == "ubuntu":
29 from charmhelpers.core.kernel_factory.ubuntu import (
30 persistent_modprobe,
31 update_initramfs,
32 ) # flake8: noqa -- ignore F401 for this import
33elif __platform__ == "centos":
34 from charmhelpers.core.kernel_factory.centos import (
35 persistent_modprobe,
36 update_initramfs,
37 ) # flake8: noqa -- ignore F401 for this import
38
39__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
40
41
42def modprobe(module, persist=True):
43 """Load a kernel module and configure for auto-load on reboot."""
44 cmd = ['modprobe', module]
45
46 log('Loading kernel module %s' % module, level=INFO)
47
48 subprocess.check_call(cmd)
49 if persist:
50 persistent_modprobe(module)
51
52
53def rmmod(module, force=False):
54 """Remove a module from the linux kernel"""
55 cmd = ['rmmod']
56 if force:
57 cmd.append('-f')
58 cmd.append(module)
59 log('Removing kernel module %s' % module, level=INFO)
60 return subprocess.check_call(cmd)
61
62
63def lsmod():
64 """Shows what kernel modules are currently loaded"""
65 return subprocess.check_output(['lsmod'],
66 universal_newlines=True)
67
68
69def is_module_loaded(module):
70 """Checks if a kernel module is already loaded"""
71 matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
72 return len(matches) > 0
diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py
0new file mode 10064473new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/__init__.py
diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py
1new file mode 10064474new file mode 100644
index 0000000..1c402c1
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/centos.py
@@ -0,0 +1,17 @@
1import subprocess
2import os
3
4
5def persistent_modprobe(module):
6 """Load a kernel module and configure for auto-load on reboot."""
7 if not os.path.exists('/etc/rc.modules'):
8 open('/etc/rc.modules', 'a')
9 os.chmod('/etc/rc.modules', 111)
10 with open('/etc/rc.modules', 'r+') as modules:
11 if module not in modules.read():
12 modules.write('modprobe %s\n' % module)
13
14
15def update_initramfs(version='all'):
16 """Updates an initramfs image."""
17 return subprocess.check_call(["dracut", "-f", version])
diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
0new file mode 10064418new file mode 100644
index 0000000..3de372f
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
@@ -0,0 +1,13 @@
1import subprocess
2
3
4def persistent_modprobe(module):
5 """Load a kernel module and configure for auto-load on reboot."""
6 with open('/etc/modules', 'r+') as modules:
7 if module not in modules.read():
8 modules.write(module + "\n")
9
10
11def update_initramfs(version='all'):
12 """Updates an initramfs image."""
13 return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py
0new file mode 10064414new file mode 100644
index 0000000..61fd074
--- /dev/null
+++ b/hooks/charmhelpers/core/services/__init__.py
@@ -0,0 +1,16 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from .base import * # NOQA
16from .helpers import * # NOQA
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
0new file mode 10064417new file mode 100644
index 0000000..179ad4f
--- /dev/null
+++ b/hooks/charmhelpers/core/services/base.py
@@ -0,0 +1,362 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import os
16import json
17from inspect import getargspec
18from collections import Iterable, OrderedDict
19
20from charmhelpers.core import host
21from charmhelpers.core import hookenv
22
23
24__all__ = ['ServiceManager', 'ManagerCallback',
25 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
26 'service_restart', 'service_stop']
27
28
29class ServiceManager(object):
30 def __init__(self, services=None):
31 """
32 Register a list of services, given their definitions.
33
34 Service definitions are dicts in the following formats (all keys except
35 'service' are optional)::
36
37 {
38 "service": <service name>,
39 "required_data": <list of required data contexts>,
40 "provided_data": <list of provided data contexts>,
41 "data_ready": <one or more callbacks>,
42 "data_lost": <one or more callbacks>,
43 "start": <one or more callbacks>,
44 "stop": <one or more callbacks>,
45 "ports": <list of ports to manage>,
46 }
47
48 The 'required_data' list should contain dicts of required data (or
49 dependency managers that act like dicts and know how to collect the data).
50 Only when all items in the 'required_data' list are populated are the list
51 of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
52 information.
53
54 The 'provided_data' list should contain relation data providers, most likely
55 a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
56 that will indicate a set of data to set on a given relation.
57
58 The 'data_ready' value should be either a single callback, or a list of
59 callbacks, to be called when all items in 'required_data' pass `is_ready()`.
60 Each callback will be called with the service name as the only parameter.
61 After all of the 'data_ready' callbacks are called, the 'start' callbacks
62 are fired.
63
64 The 'data_lost' value should be either a single callback, or a list of
65 callbacks, to be called when a 'required_data' item no longer passes
66 `is_ready()`. Each callback will be called with the service name as the
67 only parameter. After all of the 'data_lost' callbacks are called,
68 the 'stop' callbacks are fired.
69
70 The 'start' value should be either a single callback, or a list of
71 callbacks, to be called when starting the service, after the 'data_ready'
72 callbacks are complete. Each callback will be called with the service
73 name as the only parameter. This defaults to
74 `[host.service_start, services.open_ports]`.
75
76 The 'stop' value should be either a single callback, or a list of
77 callbacks, to be called when stopping the service. If the service is
78 being stopped because it no longer has all of its 'required_data', this
79 will be called after all of the 'data_lost' callbacks are complete.
80 Each callback will be called with the service name as the only parameter.
81 This defaults to `[services.close_ports, host.service_stop]`.
82
83 The 'ports' value should be a list of ports to manage. The default
84 'start' handler will open the ports after the service is started,
85 and the default 'stop' handler will close the ports prior to stopping
86 the service.
87
88
89 Examples:
90
91 The following registers an Upstart service called bingod that depends on
92 a mongodb relation and which runs a custom `db_migrate` function prior to
93 restarting the service, and a Runit service called spadesd::
94
95 manager = services.ServiceManager([
96 {
97 'service': 'bingod',
98 'ports': [80, 443],
99 'required_data': [MongoRelation(), config(), {'my': 'data'}],
100 'data_ready': [
101 services.template(source='bingod.conf'),
102 services.template(source='bingod.ini',
103 target='/etc/bingod.ini',
104 owner='bingo', perms=0400),
105 ],
106 },
107 {
108 'service': 'spadesd',
109 'data_ready': services.template(source='spadesd_run.j2',
110 target='/etc/sv/spadesd/run',
111 perms=0555),
112 'start': runit_start,
113 'stop': runit_stop,
114 },
115 ])
116 manager.manage()
117 """
118 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
119 self._ready = None
120 self.services = OrderedDict()
121 for service in services or []:
122 service_name = service['service']
123 self.services[service_name] = service
124
125 def manage(self):
126 """
127 Handle the current hook by doing The Right Thing with the registered services.
128 """
129 hookenv._run_atstart()
130 try:
131 hook_name = hookenv.hook_name()
132 if hook_name == 'stop':
133 self.stop_services()
134 else:
135 self.reconfigure_services()
136 self.provide_data()
137 except SystemExit as x:
138 if x.code is None or x.code == 0:
139 hookenv._run_atexit()
140 hookenv._run_atexit()
141
142 def provide_data(self):
143 """
144 Set the relation data for each provider in the ``provided_data`` list.
145
146 A provider must have a `name` attribute, which indicates which relation
147 to set data on, and a `provide_data()` method, which returns a dict of
148 data to set.
149
150 The `provide_data()` method can optionally accept two parameters:
151
152 * ``remote_service`` The name of the remote service that the data will
153 be provided to. The `provide_data()` method will be called once
154 for each connected service (not unit). This allows the method to
155 tailor its data to the given service.
156 * ``service_ready`` Whether or not the service definition had all of
157 its requirements met, and thus the ``data_ready`` callbacks run.
158
159 Note that the ``provided_data`` methods are now called **after** the
160 ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
161 a chance to generate any data necessary for the providing to the remote
162 services.
163 """
164 for service_name, service in self.services.items():
165 service_ready = self.is_ready(service_name)
166 for provider in service.get('provided_data', []):
167 for relid in hookenv.relation_ids(provider.name):
168 units = hookenv.related_units(relid)
169 if not units:
170 continue
171 remote_service = units[0].split('/')[0]
172 argspec = getargspec(provider.provide_data)
173 if len(argspec.args) > 1:
174 data = provider.provide_data(remote_service, service_ready)
175 else:
176 data = provider.provide_data()
177 if data:
178 hookenv.relation_set(relid, data)
179
180 def reconfigure_services(self, *service_names):
181 """
182 Update all files for one or more registered services, and,
183 if ready, optionally restart them.
184
185 If no service names are given, reconfigures all registered services.
186 """
187 for service_name in service_names or self.services.keys():
188 if self.is_ready(service_name):
189 self.fire_event('data_ready', service_name)
190 self.fire_event('start', service_name, default=[
191 service_restart,
192 manage_ports])
193 self.save_ready(service_name)
194 else:
195 if self.was_ready(service_name):
196 self.fire_event('data_lost', service_name)
197 self.fire_event('stop', service_name, default=[
198 manage_ports,
199 service_stop])
200 self.save_lost(service_name)
201
202 def stop_services(self, *service_names):
203 """
204 Stop one or more registered services, by name.
205
206 If no service names are given, stops all registered services.
207 """
208 for service_name in service_names or self.services.keys():
209 self.fire_event('stop', service_name, default=[
210 manage_ports,
211 service_stop])
212
213 def get_service(self, service_name):
214 """
215 Given the name of a registered service, return its service definition.
216 """
217 service = self.services.get(service_name)
218 if not service:
219 raise KeyError('Service not registered: %s' % service_name)
220 return service
221
222 def fire_event(self, event_name, service_name, default=None):
223 """
224 Fire a data_ready, data_lost, start, or stop event on a given service.
225 """
226 service = self.get_service(service_name)
227 callbacks = service.get(event_name, default)
228 if not callbacks:
229 return
230 if not isinstance(callbacks, Iterable):
231 callbacks = [callbacks]
232 for callback in callbacks:
233 if isinstance(callback, ManagerCallback):
234 callback(self, service_name, event_name)
235 else:
236 callback(service_name)
237
238 def is_ready(self, service_name):
239 """
240 Determine if a registered service is ready, by checking its 'required_data'.
241
242 A 'required_data' item can be any mapping type, and is considered ready
243 if `bool(item)` evaluates as True.
244 """
245 service = self.get_service(service_name)
246 reqs = service.get('required_data', [])
247 return all(bool(req) for req in reqs)
248
249 def _load_ready_file(self):
250 if self._ready is not None:
251 return
252 if os.path.exists(self._ready_file):
253 with open(self._ready_file) as fp:
254 self._ready = set(json.load(fp))
255 else:
256 self._ready = set()
257
258 def _save_ready_file(self):
259 if self._ready is None:
260 return
261 with open(self._ready_file, 'w') as fp:
262 json.dump(list(self._ready), fp)
263
264 def save_ready(self, service_name):
265 """
266 Save an indicator that the given service is now data_ready.
267 """
268 self._load_ready_file()
269 self._ready.add(service_name)
270 self._save_ready_file()
271
272 def save_lost(self, service_name):
273 """
274 Save an indicator that the given service is no longer data_ready.
275 """
276 self._load_ready_file()
277 self._ready.discard(service_name)
278 self._save_ready_file()
279
280 def was_ready(self, service_name):
281 """
282 Determine if the given service was previously data_ready.
283 """
284 self._load_ready_file()
285 return service_name in self._ready
286
287
288class ManagerCallback(object):
289 """
290 Special case of a callback that takes the `ServiceManager` instance
291 in addition to the service name.
292
293 Subclasses should implement `__call__` which should accept three parameters:
294
295 * `manager` The `ServiceManager` instance
296 * `service_name` The name of the service it's being triggered for
297 * `event_name` The name of the event that this callback is handling
298 """
299 def __call__(self, manager, service_name, event_name):
300 raise NotImplementedError()
301
302
303class PortManagerCallback(ManagerCallback):
304 """
305 Callback class that will open or close ports, for use as either
306 a start or stop action.
307 """
308 def __call__(self, manager, service_name, event_name):
309 service = manager.get_service(service_name)
310 # turn this generator into a list,
311 # as we'll be going over it multiple times
312 new_ports = list(service.get('ports', []))
313 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
314 if os.path.exists(port_file):
315 with open(port_file) as fp:
316 old_ports = fp.read().split(',')
317 for old_port in old_ports:
318 if bool(old_port) and not self.ports_contains(old_port, new_ports):
319 hookenv.close_port(old_port)
320 with open(port_file, 'w') as fp:
321 fp.write(','.join(str(port) for port in new_ports))
322 for port in new_ports:
323 # A port is either a number or 'ICMP'
324 protocol = 'TCP'
325 if str(port).upper() == 'ICMP':
326 protocol = 'ICMP'
327 if event_name == 'start':
328 hookenv.open_port(port, protocol)
329 elif event_name == 'stop':
330 hookenv.close_port(port, protocol)
331
332 def ports_contains(self, port, ports):
333 if not bool(port):
334 return False
335 if str(port).upper() != 'ICMP':
336 port = int(port)
337 return port in ports
338
339
340def service_stop(service_name):
341 """
342 Wrapper around host.service_stop to prevent spurious "unknown service"
343 messages in the logs.
344 """
345 if host.service_running(service_name):
346 host.service_stop(service_name)
347
348
349def service_restart(service_name):
350 """
351 Wrapper around host.service_restart to prevent spurious "unknown service"
352 messages in the logs.
353 """
354 if host.service_available(service_name):
355 if host.service_running(service_name):
356 host.service_restart(service_name)
357 else:
358 host.service_start(service_name)
359
360
361# Convenience aliases
362open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py
0new file mode 100644363new file mode 100644
index 0000000..3e6e30d
--- /dev/null
+++ b/hooks/charmhelpers/core/services/helpers.py
@@ -0,0 +1,290 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import os
16import yaml
17
18from charmhelpers.core import hookenv
19from charmhelpers.core import host
20from charmhelpers.core import templating
21
22from charmhelpers.core.services.base import ManagerCallback
23
24
25__all__ = ['RelationContext', 'TemplateCallback',
26 'render_template', 'template']
27
28
29class RelationContext(dict):
30 """
31 Base class for a context generator that gets relation data from juju.
32
33 Subclasses must provide the attributes `name`, which is the name of the
34 interface of interest, `interface`, which is the type of the interface of
35 interest, and `required_keys`, which is the set of keys required for the
36 relation to be considered complete. The data for all interfaces matching
37 the `name` attribute that are complete will used to populate the dictionary
38 values (see `get_data`, below).
39
40 The generated context will be namespaced under the relation :attr:`name`,
41 to prevent potential naming conflicts.
42
43 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
44 :param list additional_required_keys: Extend the list of :attr:`required_keys`
45 """
46 name = None
47 interface = None
48
49 def __init__(self, name=None, additional_required_keys=None):
50 if not hasattr(self, 'required_keys'):
51 self.required_keys = []
52
53 if name is not None:
54 self.name = name
55 if additional_required_keys:
56 self.required_keys.extend(additional_required_keys)
57 self.get_data()
58
59 def __bool__(self):
60 """
61 Returns True if all of the required_keys are available.
62 """
63 return self.is_ready()
64
65 __nonzero__ = __bool__
66
67 def __repr__(self):
68 return super(RelationContext, self).__repr__()
69
70 def is_ready(self):
71 """
72 Returns True if all of the `required_keys` are available from any units.
73 """
74 ready = len(self.get(self.name, [])) > 0
75 if not ready:
76 hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
77 return ready
78
79 def _is_ready(self, unit_data):
80 """
81 Helper method that tests a set of relation data and returns True if
82 all of the `required_keys` are present.
83 """
84 return set(unit_data.keys()).issuperset(set(self.required_keys))
85
86 def get_data(self):
87 """
88 Retrieve the relation data for each unit involved in a relation and,
89 if complete, store it in a list under `self[self.name]`. This
90 is automatically called when the RelationContext is instantiated.
91
92 The units are sorted lexographically first by the service ID, then by
93 the unit ID. Thus, if an interface has two other services, 'db:1'
94 and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
95 and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
96 set of data, the relation data for the units will be stored in the
97 order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
98
99 If you only care about a single unit on the relation, you can just
100 access it as `{{ interface[0]['key'] }}`. However, if you can at all
101 support multiple units on a relation, you should iterate over the list,
102 like::
103
104 {% for unit in interface -%}
105 {{ unit['key'] }}{% if not loop.last %},{% endif %}
106 {%- endfor %}
107
108 Note that since all sets of relation data from all related services and
109 units are in a single list, if you need to know which service or unit a
110 set of data came from, you'll need to extend this class to preserve
111 that information.
112 """
113 if not hookenv.relation_ids(self.name):
114 return
115
116 ns = self.setdefault(self.name, [])
117 for rid in sorted(hookenv.relation_ids(self.name)):
118 for unit in sorted(hookenv.related_units(rid)):
119 reldata = hookenv.relation_get(rid=rid, unit=unit)
120 if self._is_ready(reldata):
121 ns.append(reldata)
122
123 def provide_data(self):
124 """
125 Return data to be relation_set for this interface.
126 """
127 return {}
128
129
130class MysqlRelation(RelationContext):
131 """
132 Relation context for the `mysql` interface.
133
134 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
135 :param list additional_required_keys: Extend the list of :attr:`required_keys`
136 """
137 name = 'db'
138 interface = 'mysql'
139
140 def __init__(self, *args, **kwargs):
141 self.required_keys = ['host', 'user', 'password', 'database']
142 RelationContext.__init__(self, *args, **kwargs)
143
144
145class HttpRelation(RelationContext):
146 """
147 Relation context for the `http` interface.
148
149 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
150 :param list additional_required_keys: Extend the list of :attr:`required_keys`
151 """
152 name = 'website'
153 interface = 'http'
154
155 def __init__(self, *args, **kwargs):
156 self.required_keys = ['host', 'port']
157 RelationContext.__init__(self, *args, **kwargs)
158
159 def provide_data(self):
160 return {
161 'host': hookenv.unit_get('private-address'),
162 'port': 80,
163 }
164
165
166class RequiredConfig(dict):
167 """
168 Data context that loads config options with one or more mandatory options.
169
170 Once the required options have been changed from their default values, all
171 config options will be available, namespaced under `config` to prevent
172 potential naming conflicts (for example, between a config option and a
173 relation property).
174
175 :param list *args: List of options that must be changed from their default values.
176 """
177
178 def __init__(self, *args):
179 self.required_options = args
180 self['config'] = hookenv.config()
181 with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
182 self.config = yaml.load(fp).get('options', {})
183
184 def __bool__(self):
185 for option in self.required_options:
186 if option not in self['config']:
187 return False
188 current_value = self['config'][option]
189 default_value = self.config[option].get('default')
190 if current_value == default_value:
191 return False
192 if current_value in (None, '') and default_value in (None, ''):
193 return False
194 return True
195
196 def __nonzero__(self):
197 return self.__bool__()
198
199
200class StoredContext(dict):
201 """
202 A data context that always returns the data that it was first created with.
203
204 This is useful to do a one-time generation of things like passwords, that
205 will thereafter use the same value that was originally generated, instead
206 of generating a new value each time it is run.
207 """
208 def __init__(self, file_name, config_data):
209 """
210 If the file exists, populate `self` with the data from the file.
211 Otherwise, populate with the given data and persist it to the file.
212 """
213 if os.path.exists(file_name):
214 self.update(self.read_context(file_name))
215 else:
216 self.store_context(file_name, config_data)
217 self.update(config_data)
218
219 def store_context(self, file_name, config_data):
220 if not os.path.isabs(file_name):
221 file_name = os.path.join(hookenv.charm_dir(), file_name)
222 with open(file_name, 'w') as file_stream:
223 os.fchmod(file_stream.fileno(), 0o600)
224 yaml.dump(config_data, file_stream)
225
226 def read_context(self, file_name):
227 if not os.path.isabs(file_name):
228 file_name = os.path.join(hookenv.charm_dir(), file_name)
229 with open(file_name, 'r') as file_stream:
230 data = yaml.load(file_stream)
231 if not data:
232 raise OSError("%s is empty" % file_name)
233 return data
234
235
236class TemplateCallback(ManagerCallback):
237 """
238 Callback class that will render a Jinja2 template, for use as a ready
239 action.
240
241 :param str source: The template source file, relative to
242 `$CHARM_DIR/templates`
243
244 :param str target: The target to write the rendered template to (or None)
245 :param str owner: The owner of the rendered file
246 :param str group: The group of the rendered file
247 :param int perms: The permissions of the rendered file
248 :param partial on_change_action: functools partial to be executed when
249 rendered file changes
250 :param jinja2 loader template_loader: A jinja2 template loader
251
252 :return str: The rendered template
253 """
254 def __init__(self, source, target,
255 owner='root', group='root', perms=0o444,
256 on_change_action=None, template_loader=None):
257 self.source = source
258 self.target = target
259 self.owner = owner
260 self.group = group
261 self.perms = perms
262 self.on_change_action = on_change_action
263 self.template_loader = template_loader
264
265 def __call__(self, manager, service_name, event_name):
266 pre_checksum = ''
267 if self.on_change_action and os.path.isfile(self.target):
268 pre_checksum = host.file_hash(self.target)
269 service = manager.get_service(service_name)
270 context = {'ctx': {}}
271 for ctx in service.get('required_data', []):
272 context.update(ctx)
273 context['ctx'].update(ctx)
274
275 result = templating.render(self.source, self.target, context,
276 self.owner, self.group, self.perms,
277 template_loader=self.template_loader)
278 if self.on_change_action:
279 if pre_checksum == host.file_hash(self.target):
280 hookenv.log(
281 'No change detected: {}'.format(self.target),
282 hookenv.DEBUG)
283 else:
284 self.on_change_action()
285
286 return result
287
288
289# Convenience aliases for templates
290render_template = template = TemplateCallback
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
0new file mode 100644291new file mode 100644
index 0000000..e8df045
--- /dev/null
+++ b/hooks/charmhelpers/core/strutils.py
@@ -0,0 +1,129 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18import six
19import re
20
21
22def bool_from_string(value):
23 """Interpret string value as boolean.
24
25 Returns True if value translates to True otherwise False.
26 """
27 if isinstance(value, six.string_types):
28 value = six.text_type(value)
29 else:
30 msg = "Unable to interpret non-string value '%s' as boolean" % (value)
31 raise ValueError(msg)
32
33 value = value.strip().lower()
34
35 if value in ['y', 'yes', 'true', 't', 'on']:
36 return True
37 elif value in ['n', 'no', 'false', 'f', 'off']:
38 return False
39
40 msg = "Unable to interpret string value '%s' as boolean" % (value)
41 raise ValueError(msg)
42
43
44def bytes_from_string(value):
45 """Interpret human readable string value as bytes.
46
47 Returns int
48 """
49 BYTE_POWER = {
50 'K': 1,
51 'KB': 1,
52 'M': 2,
53 'MB': 2,
54 'G': 3,
55 'GB': 3,
56 'T': 4,
57 'TB': 4,
58 'P': 5,
59 'PB': 5,
60 }
61 if isinstance(value, six.string_types):
62 value = six.text_type(value)
63 else:
64 msg = "Unable to interpret non-string value '%s' as bytes" % (value)
65 raise ValueError(msg)
66 matches = re.match("([0-9]+)([a-zA-Z]+)", value)
67 if matches:
68 size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
69 else:
70 # Assume that value passed in is bytes
71 try:
72 size = int(value)
73 except ValueError:
74 msg = "Unable to interpret string value '%s' as bytes" % (value)
75 raise ValueError(msg)
76 return size
77
78
79class BasicStringComparator(object):
80 """Provides a class that will compare strings from an iterator type object.
81 Used to provide > and < comparisons on strings that may not necessarily be
82 alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
83 z-wrap.
84 """
85
86 _list = None
87
88 def __init__(self, item):
89 if self._list is None:
90 raise Exception("Must define the _list in the class definition!")
91 try:
92 self.index = self._list.index(item)
93 except Exception:
94 raise KeyError("Item '{}' is not in list '{}'"
95 .format(item, self._list))
96
97 def __eq__(self, other):
98 assert isinstance(other, str) or isinstance(other, self.__class__)
99 return self.index == self._list.index(other)
100
101 def __ne__(self, other):
102 return not self.__eq__(other)
103
104 def __lt__(self, other):
105 assert isinstance(other, str) or isinstance(other, self.__class__)
106 return self.index < self._list.index(other)
107
108 def __ge__(self, other):
109 return not self.__lt__(other)
110
111 def __gt__(self, other):
112 assert isinstance(other, str) or isinstance(other, self.__class__)
113 return self.index > self._list.index(other)
114
115 def __le__(self, other):
116 return not self.__gt__(other)
117
118 def __str__(self):
119 """Always give back the item at the index so it can be used in
120 comparisons like:
121
122 s_mitaka = CompareOpenStack('mitaka')
123 s_newton = CompareOpenstack('newton')
124
125 assert s_newton > s_mitaka
126
127 @returns: <string>
128 """
129 return self._list[self.index]
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
0new file mode 100644130new file mode 100644
index 0000000..1f188d8
--- /dev/null
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -0,0 +1,58 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17
18import yaml
19
20from subprocess import check_call
21
22from charmhelpers.core.hookenv import (
23 log,
24 DEBUG,
25 ERROR,
26)
27
28__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
29
30
31def create(sysctl_dict, sysctl_file):
32 """Creates a sysctl.conf file from a YAML associative array
33
34 :param sysctl_dict: a dict or YAML-formatted string of sysctl
35 options eg "{ 'kernel.max_pid': 1337 }"
36 :type sysctl_dict: str
37 :param sysctl_file: path to the sysctl file to be saved
38 :type sysctl_file: str or unicode
39 :returns: None
40 """
41 if type(sysctl_dict) is not dict:
42 try:
43 sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
44 except yaml.YAMLError:
45 log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
46 level=ERROR)
47 return
48 else:
49 sysctl_dict_parsed = sysctl_dict
50
51 with open(sysctl_file, "w") as fd:
52 for key, value in sysctl_dict_parsed.items():
53 fd.write("{}={}\n".format(key, value))
54
55 log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
56 level=DEBUG)
57
58 check_call(["sysctl", "-p", sysctl_file])
diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py
0new file mode 10064459new file mode 100644
index 0000000..9014015
--- /dev/null
+++ b/hooks/charmhelpers/core/templating.py
@@ -0,0 +1,93 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import os
16import sys
17
18from charmhelpers.core import host
19from charmhelpers.core import hookenv
20
21
22def render(source, target, context, owner='root', group='root',
23 perms=0o444, templates_dir=None, encoding='UTF-8',
24 template_loader=None, config_template=None):
25 """
26 Render a template.
27
28 The `source` path, if not absolute, is relative to the `templates_dir`.
29
30 The `target` path should be absolute. It can also be `None`, in which
31 case no file will be written.
32
33 The context should be a dict containing the values to be replaced in the
34 template.
35
36 config_template may be provided to render from a provided template instead
37 of loading from a file.
38
39 The `owner`, `group`, and `perms` options will be passed to `write_file`.
40
41 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
42
43 The rendered template will be written to the file as well as being returned
44 as a string.
45
46 Note: Using this requires python-jinja2 or python3-jinja2; if it is not
47 installed, calling this will attempt to use charmhelpers.fetch.apt_install
48 to install it.
49 """
50 try:
51 from jinja2 import FileSystemLoader, Environment, exceptions
52 except ImportError:
53 try:
54 from charmhelpers.fetch import apt_install
55 except ImportError:
56 hookenv.log('Could not import jinja2, and could not import '
57 'charmhelpers.fetch to install it',
58 level=hookenv.ERROR)
59 raise
60 if sys.version_info.major == 2:
61 apt_install('python-jinja2', fatal=True)
62 else:
63 apt_install('python3-jinja2', fatal=True)
64 from jinja2 import FileSystemLoader, Environment, exceptions
65
66 if template_loader:
67 template_env = Environment(loader=template_loader)
68 else:
69 if templates_dir is None:
70 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
71 template_env = Environment(loader=FileSystemLoader(templates_dir))
72
73 # load from a string if provided explicitly
74 if config_template is not None:
75 template = template_env.from_string(config_template)
76 else:
77 try:
78 source = source
79 template = template_env.get_template(source)
80 except exceptions.TemplateNotFound as e:
81 hookenv.log('Could not load template %s from %s.' %
82 (source, templates_dir),
83 level=hookenv.ERROR)
84 raise e
85 content = template.render(context)
86 if target is not None:
87 target_dir = os.path.dirname(target)
88 if not os.path.exists(target_dir):
89 # This is a terrible default directory permission, as the file
90 # or its siblings will often contain secrets.
91 host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
92 host.write_file(target, content.encode(encoding), owner, group, perms)
93 return content
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
0new file mode 10064494new file mode 100644
index 0000000..ab55432
--- /dev/null
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -0,0 +1,525 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3#
4# Copyright 2014-2015 Canonical Limited.
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17#
18# Authors:
19# Kapil Thangavelu <kapil.foss@gmail.com>
20#
21"""
22Intro
23-----
24
25A simple way to store state in units. This provides a key value
26storage with support for versioned, transactional operation,
27and can calculate deltas from previous values to simplify unit logic
28when processing changes.
29
30
31Hook Integration
32----------------
33
34There are several extant frameworks for hook execution, including
35
36 - charmhelpers.core.hookenv.Hooks
37 - charmhelpers.core.services.ServiceManager
38
39The storage classes are framework agnostic, one simple integration is
40via the HookData contextmanager. It will record the current hook
41execution environment (including relation data, config data, etc.),
42setup a transaction and allow easy access to the changes from
43previously seen values. One consequence of the integration is the
44reservation of particular keys ('rels', 'unit', 'env', 'config',
45'charm_revisions') for their respective values.
46
47Here's a fully worked integration example using hookenv.Hooks::
48
49 from charmhelper.core import hookenv, unitdata
50
51 hook_data = unitdata.HookData()
52 db = unitdata.kv()
53 hooks = hookenv.Hooks()
54
55 @hooks.hook
56 def config_changed():
57 # Print all changes to configuration from previously seen
58 # values.
59 for changed, (prev, cur) in hook_data.conf.items():
60 print('config changed', changed,
61 'previous value', prev,
62 'current value', cur)
63
64 # Get some unit specific bookeeping
65 if not db.get('pkg_key'):
66 key = urllib.urlopen('https://example.com/pkg_key').read()
67 db.set('pkg_key', key)
68
69 # Directly access all charm config as a mapping.
70 conf = db.getrange('config', True)
71
72 # Directly access all relation data as a mapping
73 rels = db.getrange('rels', True)
74
75 if __name__ == '__main__':
76 with hook_data():
77 hook.execute()
78
79
80A more basic integration is via the hook_scope context manager which simply
81manages transaction scope (and records hook name, and timestamp)::
82
83 >>> from unitdata import kv
84 >>> db = kv()
85 >>> with db.hook_scope('install'):
86 ... # do work, in transactional scope.
87 ... db.set('x', 1)
88 >>> db.get('x')
89 1
90
91
92Usage
93-----
94
95Values are automatically json de/serialized to preserve basic typing
96and complex data struct capabilities (dicts, lists, ints, booleans, etc).
97
98Individual values can be manipulated via get/set::
99
100 >>> kv.set('y', True)
101 >>> kv.get('y')
102 True
103
104 # We can set complex values (dicts, lists) as a single key.
105 >>> kv.set('config', {'a': 1, 'b': True'})
106
107 # Also supports returning dictionaries as a record which
108 # provides attribute access.
109 >>> config = kv.get('config', record=True)
110 >>> config.b
111 True
112
113
114Groups of keys can be manipulated with update/getrange::
115
116 >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
117 >>> kv.getrange('gui.', strip=True)
118 {'z': 1, 'y': 2}
119
120When updating values, its very helpful to understand which values
121have actually changed and how have they changed. The storage
122provides a delta method to provide for this::
123
124 >>> data = {'debug': True, 'option': 2}
125 >>> delta = kv.delta(data, 'config.')
126 >>> delta.debug.previous
127 None
128 >>> delta.debug.current
129 True
130 >>> delta
131 {'debug': (None, True), 'option': (None, 2)}
132
133Note the delta method does not persist the actual change, it needs to
134be explicitly saved via 'update' method::
135
136 >>> kv.update(data, 'config.')
137
138Values modified in the context of a hook scope retain historical values
139associated to the hookname.
140
141 >>> with db.hook_scope('config-changed'):
142 ... db.set('x', 42)
143 >>> db.gethistory('x')
144 [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
145 (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
146
147"""
148
149import collections
150import contextlib
151import datetime
152import itertools
153import json
154import os
155import pprint
156import sqlite3
157import sys
158
159__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
160
161
162class Storage(object):
163 """Simple key value database for local unit state within charms.
164
165 Modifications are not persisted unless :meth:`flush` is called.
166
167 To support dicts, lists, integer, floats, and booleans values
168 are automatically json encoded/decoded.
169
170 Note: to facilitate unit testing, ':memory:' can be passed as the
171 path parameter which causes sqlite3 to only build the db in memory.
172 This should only be used for testing purposes.
173 """
174 def __init__(self, path=None):
175 self.db_path = path
176 if path is None:
177 if 'UNIT_STATE_DB' in os.environ:
178 self.db_path = os.environ['UNIT_STATE_DB']
179 else:
180 self.db_path = os.path.join(
181 os.environ.get('CHARM_DIR', ''), '.unit-state.db')
182 if self.db_path != ':memory:':
183 with open(self.db_path, 'a') as f:
184 os.fchmod(f.fileno(), 0o600)
185 self.conn = sqlite3.connect('%s' % self.db_path)
186 self.cursor = self.conn.cursor()
187 self.revision = None
188 self._closed = False
189 self._init()
190
191 def close(self):
192 if self._closed:
193 return
194 self.flush(False)
195 self.cursor.close()
196 self.conn.close()
197 self._closed = True
198
199 def get(self, key, default=None, record=False):
200 self.cursor.execute('select data from kv where key=?', [key])
201 result = self.cursor.fetchone()
202 if not result:
203 return default
204 if record:
205 return Record(json.loads(result[0]))
206 return json.loads(result[0])
207
208 def getrange(self, key_prefix, strip=False):
209 """
210 Get a range of keys starting with a common prefix as a mapping of
211 keys to values.
212
213 :param str key_prefix: Common prefix among all keys
214 :param bool strip: Optionally strip the common prefix from the key
215 names in the returned dict
216 :return dict: A (possibly empty) dict of key-value mappings
217 """
218 self.cursor.execute("select key, data from kv where key like ?",
219 ['%s%%' % key_prefix])
220 result = self.cursor.fetchall()
221
222 if not result:
223 return {}
224 if not strip:
225 key_prefix = ''
226 return dict([
227 (k[len(key_prefix):], json.loads(v)) for k, v in result])
228
229 def update(self, mapping, prefix=""):
230 """
231 Set the values of multiple keys at once.
232
233 :param dict mapping: Mapping of keys to values
234 :param str prefix: Optional prefix to apply to all keys in `mapping`
235 before setting
236 """
237 for k, v in mapping.items():
238 self.set("%s%s" % (prefix, k), v)
239
240 def unset(self, key):
241 """
242 Remove a key from the database entirely.
243 """
244 self.cursor.execute('delete from kv where key=?', [key])
245 if self.revision and self.cursor.rowcount:
246 self.cursor.execute(
247 'insert into kv_revisions values (?, ?, ?)',
248 [key, self.revision, json.dumps('DELETED')])
249
250 def unsetrange(self, keys=None, prefix=""):
251 """
252 Remove a range of keys starting with a common prefix, from the database
253 entirely.
254
255 :param list keys: List of keys to remove.
256 :param str prefix: Optional prefix to apply to all keys in ``keys``
257 before removing.
258 """
259 if keys is not None:
260 keys = ['%s%s' % (prefix, key) for key in keys]
261 self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
262 if self.revision and self.cursor.rowcount:
263 self.cursor.execute(
264 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
265 list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
266 else:
267 self.cursor.execute('delete from kv where key like ?',
268 ['%s%%' % prefix])
269 if self.revision and self.cursor.rowcount:
270 self.cursor.execute(
271 'insert into kv_revisions values (?, ?, ?)',
272 ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
273
274 def set(self, key, value):
275 """
276 Set a value in the database.
277
278 :param str key: Key to set the value for
279 :param value: Any JSON-serializable value to be set
280 """
281 serialized = json.dumps(value)
282
283 self.cursor.execute('select data from kv where key=?', [key])
284 exists = self.cursor.fetchone()
285
286 # Skip mutations to the same value
287 if exists:
288 if exists[0] == serialized:
289 return value
290
291 if not exists:
292 self.cursor.execute(
293 'insert into kv (key, data) values (?, ?)',
294 (key, serialized))
295 else:
296 self.cursor.execute('''
297 update kv
298 set data = ?
299 where key = ?''', [serialized, key])
300
301 # Save
302 if not self.revision:
303 return value
304
305 self.cursor.execute(
306 'select 1 from kv_revisions where key=? and revision=?',
307 [key, self.revision])
308 exists = self.cursor.fetchone()
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: