Merge lp:~1chb1n/charms/trusty/rabbitmq-server/next-ch-sync-mitaka-uca into lp:~openstack-charmers-archive/charms/trusty/rabbitmq-server/next
- Trusty Tahr (14.04)
- next-ch-sync-mitaka-uca
- Merge into next
Proposed by
Ryan Beisner
Status: | Merged |
---|---|
Merged at revision: | 127 |
Proposed branch: | lp:~1chb1n/charms/trusty/rabbitmq-server/next-ch-sync-mitaka-uca |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/rabbitmq-server/next |
Diff against target: |
2209 lines (+990/-261) 22 files modified
hooks/charmhelpers/cli/__init__.py (+3/-3) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+52/-14) hooks/charmhelpers/contrib/network/ip.py (+21/-19) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+75/-11) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+25/-3) hooks/charmhelpers/contrib/openstack/context.py (+32/-2) hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+7/-5) hooks/charmhelpers/contrib/openstack/neutron.py (+2/-2) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+19/-11) hooks/charmhelpers/contrib/openstack/utils.py (+59/-26) hooks/charmhelpers/contrib/python/packages.py (+13/-4) hooks/charmhelpers/contrib/storage/linux/ceph.py (+441/-59) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/core/hookenv.py (+54/-6) hooks/charmhelpers/core/host.py (+82/-21) hooks/charmhelpers/core/services/helpers.py (+14/-5) hooks/charmhelpers/core/templating.py (+21/-8) hooks/charmhelpers/fetch/__init__.py (+10/-2) hooks/charmhelpers/fetch/archiveurl.py (+1/-1) hooks/charmhelpers/fetch/bzrurl.py (+22/-32) hooks/charmhelpers/fetch/giturl.py (+19/-24) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+8/-3) |
To merge this branch: | bzr merge lp:~1chb1n/charms/trusty/rabbitmq-server/next-ch-sync-mitaka-uca |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+282210@code.launchpad.net |
Commit message
Description of the change
Sync charm-helpers for Mitaka cloud archive capability.
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #15962 rabbitmq-
UNIT OK: passed
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #8697 rabbitmq-
AMULET OK: passed
Build: http://
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/cli/__init__.py' |
2 | --- hooks/charmhelpers/cli/__init__.py 2015-08-19 13:49:53 +0000 |
3 | +++ hooks/charmhelpers/cli/__init__.py 2016-01-11 18:37:19 +0000 |
4 | @@ -20,7 +20,7 @@ |
5 | |
6 | from six.moves import zip |
7 | |
8 | -from charmhelpers.core import unitdata |
9 | +import charmhelpers.core.unitdata |
10 | |
11 | |
12 | class OutputFormatter(object): |
13 | @@ -163,8 +163,8 @@ |
14 | if getattr(arguments.func, '_cli_no_output', False): |
15 | output = '' |
16 | self.formatter.format_output(output, arguments.format) |
17 | - if unitdata._KV: |
18 | - unitdata._KV.flush() |
19 | + if charmhelpers.core.unitdata._KV: |
20 | + charmhelpers.core.unitdata._KV.flush() |
21 | |
22 | |
23 | cmdline = CommandLine() |
24 | |
25 | === modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' |
26 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-05-11 08:03:57 +0000 |
27 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-01-11 18:37:19 +0000 |
28 | @@ -148,6 +148,13 @@ |
29 | self.description = description |
30 | self.check_cmd = self._locate_cmd(check_cmd) |
31 | |
32 | + def _get_check_filename(self): |
33 | + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) |
34 | + |
35 | + def _get_service_filename(self, hostname): |
36 | + return os.path.join(NRPE.nagios_exportdir, |
37 | + 'service__{}_{}.cfg'.format(hostname, self.command)) |
38 | + |
39 | def _locate_cmd(self, check_cmd): |
40 | search_path = ( |
41 | '/usr/lib/nagios/plugins', |
42 | @@ -163,9 +170,21 @@ |
43 | log('Check command not found: {}'.format(parts[0])) |
44 | return '' |
45 | |
46 | + def _remove_service_files(self): |
47 | + if not os.path.exists(NRPE.nagios_exportdir): |
48 | + return |
49 | + for f in os.listdir(NRPE.nagios_exportdir): |
50 | + if f.endswith('_{}.cfg'.format(self.command)): |
51 | + os.remove(os.path.join(NRPE.nagios_exportdir, f)) |
52 | + |
53 | + def remove(self, hostname): |
54 | + nrpe_check_file = self._get_check_filename() |
55 | + if os.path.exists(nrpe_check_file): |
56 | + os.remove(nrpe_check_file) |
57 | + self._remove_service_files() |
58 | + |
59 | def write(self, nagios_context, hostname, nagios_servicegroups): |
60 | - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( |
61 | - self.command) |
62 | + nrpe_check_file = self._get_check_filename() |
63 | with open(nrpe_check_file, 'w') as nrpe_check_config: |
64 | nrpe_check_config.write("# check {}\n".format(self.shortname)) |
65 | nrpe_check_config.write("command[{}]={}\n".format( |
66 | @@ -180,9 +199,7 @@ |
67 | |
68 | def write_service_config(self, nagios_context, hostname, |
69 | nagios_servicegroups): |
70 | - for f in os.listdir(NRPE.nagios_exportdir): |
71 | - if re.search('.*{}.cfg'.format(self.command), f): |
72 | - os.remove(os.path.join(NRPE.nagios_exportdir, f)) |
73 | + self._remove_service_files() |
74 | |
75 | templ_vars = { |
76 | 'nagios_hostname': hostname, |
77 | @@ -192,8 +209,7 @@ |
78 | 'command': self.command, |
79 | } |
80 | nrpe_service_text = Check.service_template.format(**templ_vars) |
81 | - nrpe_service_file = '{}/service__{}_{}.cfg'.format( |
82 | - NRPE.nagios_exportdir, hostname, self.command) |
83 | + nrpe_service_file = self._get_service_filename(hostname) |
84 | with open(nrpe_service_file, 'w') as nrpe_service_config: |
85 | nrpe_service_config.write(str(nrpe_service_text)) |
86 | |
87 | @@ -218,12 +234,32 @@ |
88 | if hostname: |
89 | self.hostname = hostname |
90 | else: |
91 | - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) |
92 | + nagios_hostname = get_nagios_hostname() |
93 | + if nagios_hostname: |
94 | + self.hostname = nagios_hostname |
95 | + else: |
96 | + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) |
97 | self.checks = [] |
98 | |
99 | def add_check(self, *args, **kwargs): |
100 | self.checks.append(Check(*args, **kwargs)) |
101 | |
102 | + def remove_check(self, *args, **kwargs): |
103 | + if kwargs.get('shortname') is None: |
104 | + raise ValueError('shortname of check must be specified') |
105 | + |
106 | + # Use sensible defaults if they're not specified - these are not |
107 | + # actually used during removal, but they're required for constructing |
108 | + # the Check object; check_disk is chosen because it's part of the |
109 | + # nagios-plugins-basic package. |
110 | + if kwargs.get('check_cmd') is None: |
111 | + kwargs['check_cmd'] = 'check_disk' |
112 | + if kwargs.get('description') is None: |
113 | + kwargs['description'] = '' |
114 | + |
115 | + check = Check(*args, **kwargs) |
116 | + check.remove(self.hostname) |
117 | + |
118 | def write(self): |
119 | try: |
120 | nagios_uid = pwd.getpwnam('nagios').pw_uid |
121 | @@ -260,7 +296,7 @@ |
122 | :param str relation_name: Name of relation nrpe sub joined to |
123 | """ |
124 | for rel in relations_of_type(relation_name): |
125 | - if 'nagios_hostname' in rel: |
126 | + if 'nagios_host_context' in rel: |
127 | return rel['nagios_host_context'] |
128 | |
129 | |
130 | @@ -301,11 +337,13 @@ |
131 | upstart_init = '/etc/init/%s.conf' % svc |
132 | sysv_init = '/etc/init.d/%s' % svc |
133 | if os.path.exists(upstart_init): |
134 | - nrpe.add_check( |
135 | - shortname=svc, |
136 | - description='process check {%s}' % unit_name, |
137 | - check_cmd='check_upstart_job %s' % svc |
138 | - ) |
139 | + # Don't add a check for these services from neutron-gateway |
140 | + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: |
141 | + nrpe.add_check( |
142 | + shortname=svc, |
143 | + description='process check {%s}' % unit_name, |
144 | + check_cmd='check_upstart_job %s' % svc |
145 | + ) |
146 | elif os.path.exists(sysv_init): |
147 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc |
148 | cron_file = ('*/5 * * * * root ' |
149 | |
150 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' |
151 | --- hooks/charmhelpers/contrib/network/ip.py 2015-10-20 20:38:17 +0000 |
152 | +++ hooks/charmhelpers/contrib/network/ip.py 2016-01-11 18:37:19 +0000 |
153 | @@ -53,7 +53,7 @@ |
154 | |
155 | |
156 | def no_ip_found_error_out(network): |
157 | - errmsg = ("No IP address found in network: %s" % network) |
158 | + errmsg = ("No IP address found in network(s): %s" % network) |
159 | raise ValueError(errmsg) |
160 | |
161 | |
162 | @@ -61,7 +61,7 @@ |
163 | """Get an IPv4 or IPv6 address within the network from the host. |
164 | |
165 | :param network (str): CIDR presentation format. For example, |
166 | - '192.168.1.0/24'. |
167 | + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
168 | :param fallback (str): If no address is found, return fallback. |
169 | :param fatal (boolean): If no address is found, fallback is not |
170 | set and fatal is True then exit(1). |
171 | @@ -75,24 +75,26 @@ |
172 | else: |
173 | return None |
174 | |
175 | - _validate_cidr(network) |
176 | - network = netaddr.IPNetwork(network) |
177 | - for iface in netifaces.interfaces(): |
178 | - addresses = netifaces.ifaddresses(iface) |
179 | - if network.version == 4 and netifaces.AF_INET in addresses: |
180 | - addr = addresses[netifaces.AF_INET][0]['addr'] |
181 | - netmask = addresses[netifaces.AF_INET][0]['netmask'] |
182 | - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
183 | - if cidr in network: |
184 | - return str(cidr.ip) |
185 | + networks = network.split() or [network] |
186 | + for network in networks: |
187 | + _validate_cidr(network) |
188 | + network = netaddr.IPNetwork(network) |
189 | + for iface in netifaces.interfaces(): |
190 | + addresses = netifaces.ifaddresses(iface) |
191 | + if network.version == 4 and netifaces.AF_INET in addresses: |
192 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
193 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
194 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
195 | + if cidr in network: |
196 | + return str(cidr.ip) |
197 | |
198 | - if network.version == 6 and netifaces.AF_INET6 in addresses: |
199 | - for addr in addresses[netifaces.AF_INET6]: |
200 | - if not addr['addr'].startswith('fe80'): |
201 | - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
202 | - addr['netmask'])) |
203 | - if cidr in network: |
204 | - return str(cidr.ip) |
205 | + if network.version == 6 and netifaces.AF_INET6 in addresses: |
206 | + for addr in addresses[netifaces.AF_INET6]: |
207 | + if not addr['addr'].startswith('fe80'): |
208 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
209 | + addr['netmask'])) |
210 | + if cidr in network: |
211 | + return str(cidr.ip) |
212 | |
213 | if fallback is not None: |
214 | return fallback |
215 | |
216 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
217 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-20 20:38:17 +0000 |
218 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-11 18:37:19 +0000 |
219 | @@ -14,13 +14,18 @@ |
220 | # You should have received a copy of the GNU Lesser General Public License |
221 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
222 | |
223 | +import logging |
224 | import re |
225 | +import sys |
226 | import six |
227 | from collections import OrderedDict |
228 | from charmhelpers.contrib.amulet.deployment import ( |
229 | AmuletDeployment |
230 | ) |
231 | |
232 | +DEBUG = logging.DEBUG |
233 | +ERROR = logging.ERROR |
234 | + |
235 | |
236 | class OpenStackAmuletDeployment(AmuletDeployment): |
237 | """OpenStack amulet deployment. |
238 | @@ -29,9 +34,12 @@ |
239 | that is specifically for use by OpenStack charms. |
240 | """ |
241 | |
242 | - def __init__(self, series=None, openstack=None, source=None, stable=True): |
243 | + def __init__(self, series=None, openstack=None, source=None, |
244 | + stable=True, log_level=DEBUG): |
245 | """Initialize the deployment environment.""" |
246 | super(OpenStackAmuletDeployment, self).__init__(series) |
247 | + self.log = self.get_logger(level=log_level) |
248 | + self.log.info('OpenStackAmuletDeployment: init') |
249 | self.openstack = openstack |
250 | self.source = source |
251 | self.stable = stable |
252 | @@ -39,6 +47,22 @@ |
253 | # out. |
254 | self.current_next = "trusty" |
255 | |
256 | + def get_logger(self, name="deployment-logger", level=logging.DEBUG): |
257 | + """Get a logger object that will log to stdout.""" |
258 | + log = logging |
259 | + logger = log.getLogger(name) |
260 | + fmt = log.Formatter("%(asctime)s %(funcName)s " |
261 | + "%(levelname)s: %(message)s") |
262 | + |
263 | + handler = log.StreamHandler(stream=sys.stdout) |
264 | + handler.setLevel(level) |
265 | + handler.setFormatter(fmt) |
266 | + |
267 | + logger.addHandler(handler) |
268 | + logger.setLevel(level) |
269 | + |
270 | + return logger |
271 | + |
272 | def _determine_branch_locations(self, other_services): |
273 | """Determine the branch locations for the other services. |
274 | |
275 | @@ -46,6 +70,8 @@ |
276 | stable or next (dev) branch, and based on this, use the corresonding |
277 | stable or next branches for the other_services.""" |
278 | |
279 | + self.log.info('OpenStackAmuletDeployment: determine branch locations') |
280 | + |
281 | # Charms outside the lp:~openstack-charmers namespace |
282 | base_charms = ['mysql', 'mongodb', 'nrpe'] |
283 | |
284 | @@ -83,6 +109,8 @@ |
285 | |
286 | def _add_services(self, this_service, other_services): |
287 | """Add services to the deployment and set openstack-origin/source.""" |
288 | + self.log.info('OpenStackAmuletDeployment: adding services') |
289 | + |
290 | other_services = self._determine_branch_locations(other_services) |
291 | |
292 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
293 | @@ -96,7 +124,8 @@ |
294 | 'ceph-osd', 'ceph-radosgw'] |
295 | |
296 | # Charms which can not use openstack-origin, ie. many subordinates |
297 | - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
298 | + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
299 | + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] |
300 | |
301 | if self.openstack: |
302 | for svc in services: |
303 | @@ -112,11 +141,12 @@ |
304 | |
305 | def _configure_services(self, configs): |
306 | """Configure all of the services.""" |
307 | + self.log.info('OpenStackAmuletDeployment: configure services') |
308 | for service, config in six.iteritems(configs): |
309 | self.d.configure(service, config) |
310 | |
311 | def _auto_wait_for_status(self, message=None, exclude_services=None, |
312 | - timeout=1800): |
313 | + include_only=None, timeout=1800): |
314 | """Wait for all units to have a specific extended status, except |
315 | for any defined as excluded. Unless specified via message, any |
316 | status containing any case of 'ready' will be considered a match. |
317 | @@ -127,7 +157,7 @@ |
318 | message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) |
319 | |
320 | Wait for all units to reach this status (exact match): |
321 | - message = 'Unit is ready' |
322 | + message = re.compile('^Unit is ready and clustered$') |
323 | |
324 | Wait for all units to reach any one of these (exact match): |
325 | message = re.compile('Unit is ready|OK|Ready') |
326 | @@ -139,20 +169,50 @@ |
327 | https://github.com/juju/amulet/blob/master/amulet/sentry.py |
328 | |
329 | :param message: Expected status match |
330 | - :param exclude_services: List of juju service names to ignore |
331 | + :param exclude_services: List of juju service names to ignore, |
332 | + not to be used in conjuction with include_only. |
333 | + :param include_only: List of juju service names to exclusively check, |
334 | + not to be used in conjuction with exclude_services. |
335 | :param timeout: Maximum time in seconds to wait for status match |
336 | :returns: None. Raises if timeout is hit. |
337 | """ |
338 | - |
339 | - if not message: |
340 | + self.log.info('Waiting for extended status on units...') |
341 | + |
342 | + all_services = self.d.services.keys() |
343 | + |
344 | + if exclude_services and include_only: |
345 | + raise ValueError('exclude_services can not be used ' |
346 | + 'with include_only') |
347 | + |
348 | + if message: |
349 | + if isinstance(message, re._pattern_type): |
350 | + match = message.pattern |
351 | + else: |
352 | + match = message |
353 | + |
354 | + self.log.debug('Custom extended status wait match: ' |
355 | + '{}'.format(match)) |
356 | + else: |
357 | + self.log.debug('Default extended status wait match: contains ' |
358 | + 'READY (case-insensitive)') |
359 | message = re.compile('.*ready.*', re.IGNORECASE) |
360 | |
361 | - if not exclude_services: |
362 | + if exclude_services: |
363 | + self.log.debug('Excluding services from extended status match: ' |
364 | + '{}'.format(exclude_services)) |
365 | + else: |
366 | exclude_services = [] |
367 | |
368 | - services = list(set(self.d.services.keys()) - set(exclude_services)) |
369 | + if include_only: |
370 | + services = include_only |
371 | + else: |
372 | + services = list(set(all_services) - set(exclude_services)) |
373 | + |
374 | + self.log.debug('Waiting up to {}s for extended status on services: ' |
375 | + '{}'.format(timeout, services)) |
376 | service_messages = {service: message for service in services} |
377 | self.d.sentry.wait_for_messages(service_messages, timeout=timeout) |
378 | + self.log.info('OK') |
379 | |
380 | def _get_openstack_release(self): |
381 | """Get openstack release. |
382 | @@ -165,7 +225,8 @@ |
383 | self.precise_havana, self.precise_icehouse, |
384 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
385 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
386 | - self.wily_liberty) = range(12) |
387 | + self.wily_liberty, self.trusty_mitaka, |
388 | + self.xenial_mitaka) = range(14) |
389 | |
390 | releases = { |
391 | ('precise', None): self.precise_essex, |
392 | @@ -177,9 +238,11 @@ |
393 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
394 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
395 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
396 | + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, |
397 | ('utopic', None): self.utopic_juno, |
398 | ('vivid', None): self.vivid_kilo, |
399 | - ('wily', None): self.wily_liberty} |
400 | + ('wily', None): self.wily_liberty, |
401 | + ('xenial', None): self.xenial_mitaka} |
402 | return releases[(self.series, self.openstack)] |
403 | |
404 | def _get_openstack_release_string(self): |
405 | @@ -196,6 +259,7 @@ |
406 | ('utopic', 'juno'), |
407 | ('vivid', 'kilo'), |
408 | ('wily', 'liberty'), |
409 | + ('xenial', 'mitaka'), |
410 | ]) |
411 | if self.openstack: |
412 | os_origin = self.openstack.split(':')[1] |
413 | |
414 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
415 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-10-20 20:38:17 +0000 |
416 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2016-01-11 18:37:19 +0000 |
417 | @@ -18,6 +18,7 @@ |
418 | import json |
419 | import logging |
420 | import os |
421 | +import re |
422 | import six |
423 | import time |
424 | import urllib |
425 | @@ -604,7 +605,22 @@ |
426 | '{}'.format(sample_type, samples)) |
427 | return None |
428 | |
429 | -# rabbitmq/amqp specific helpers: |
430 | + # rabbitmq/amqp specific helpers: |
431 | + |
432 | + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): |
433 | + """Wait for rmq units extended status to show cluster readiness, |
434 | + after an optional initial sleep period. Initial sleep is likely |
435 | + necessary to be effective following a config change, as status |
436 | + message may not instantly update to non-ready.""" |
437 | + |
438 | + if init_sleep: |
439 | + time.sleep(init_sleep) |
440 | + |
441 | + message = re.compile('^Unit is ready and clustered$') |
442 | + deployment._auto_wait_for_status(message=message, |
443 | + timeout=timeout, |
444 | + include_only=['rabbitmq-server']) |
445 | + |
446 | def add_rmq_test_user(self, sentry_units, |
447 | username="testuser1", password="changeme"): |
448 | """Add a test user via the first rmq juju unit, check connection as |
449 | @@ -805,7 +821,10 @@ |
450 | if port: |
451 | config['ssl_port'] = port |
452 | |
453 | - deployment.configure('rabbitmq-server', config) |
454 | + deployment.d.configure('rabbitmq-server', config) |
455 | + |
456 | + # Wait for unit status |
457 | + self.rmq_wait_for_cluster(deployment) |
458 | |
459 | # Confirm |
460 | tries = 0 |
461 | @@ -832,7 +851,10 @@ |
462 | |
463 | # Disable RMQ SSL |
464 | config = {'ssl': 'off'} |
465 | - deployment.configure('rabbitmq-server', config) |
466 | + deployment.d.configure('rabbitmq-server', config) |
467 | + |
468 | + # Wait for unit status |
469 | + self.rmq_wait_for_cluster(deployment) |
470 | |
471 | # Confirm |
472 | tries = 0 |
473 | |
474 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
475 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-10-20 20:38:17 +0000 |
476 | +++ hooks/charmhelpers/contrib/openstack/context.py 2016-01-11 18:37:19 +0000 |
477 | @@ -57,6 +57,7 @@ |
478 | get_nic_hwaddr, |
479 | mkdir, |
480 | write_file, |
481 | + pwgen, |
482 | ) |
483 | from charmhelpers.contrib.hahelpers.cluster import ( |
484 | determine_apache_port, |
485 | @@ -87,6 +88,8 @@ |
486 | is_bridge_member, |
487 | ) |
488 | from charmhelpers.contrib.openstack.utils import get_host_ip |
489 | +from charmhelpers.core.unitdata import kv |
490 | + |
491 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
492 | ADDRESS_TYPES = ['admin', 'internal', 'public'] |
493 | |
494 | @@ -626,15 +629,28 @@ |
495 | if config('haproxy-client-timeout'): |
496 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
497 | |
498 | + if config('haproxy-queue-timeout'): |
499 | + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') |
500 | + |
501 | + if config('haproxy-connect-timeout'): |
502 | + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') |
503 | + |
504 | if config('prefer-ipv6'): |
505 | ctxt['ipv6'] = True |
506 | ctxt['local_host'] = 'ip6-localhost' |
507 | ctxt['haproxy_host'] = '::' |
508 | - ctxt['stat_port'] = ':::8888' |
509 | else: |
510 | ctxt['local_host'] = '127.0.0.1' |
511 | ctxt['haproxy_host'] = '0.0.0.0' |
512 | - ctxt['stat_port'] = ':8888' |
513 | + |
514 | + ctxt['stat_port'] = '8888' |
515 | + |
516 | + db = kv() |
517 | + ctxt['stat_password'] = db.get('stat-password') |
518 | + if not ctxt['stat_password']: |
519 | + ctxt['stat_password'] = db.set('stat-password', |
520 | + pwgen(32)) |
521 | + db.flush() |
522 | |
523 | for frontend in cluster_hosts: |
524 | if (len(cluster_hosts[frontend]['backends']) > 1 or |
525 | @@ -1088,6 +1104,20 @@ |
526 | config_flags_parser(config_flags)} |
527 | |
528 | |
529 | +class LibvirtConfigFlagsContext(OSContextGenerator): |
530 | + """ |
531 | + This context provides support for extending |
532 | + the libvirt section through user-defined flags. |
533 | + """ |
534 | + def __call__(self): |
535 | + ctxt = {} |
536 | + libvirt_flags = config('libvirt-flags') |
537 | + if libvirt_flags: |
538 | + ctxt['libvirt_flags'] = config_flags_parser( |
539 | + libvirt_flags) |
540 | + return ctxt |
541 | + |
542 | + |
543 | class SubordinateConfigContext(OSContextGenerator): |
544 | |
545 | """ |
546 | |
547 | === modified file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh' |
548 | --- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-03-03 06:05:14 +0000 |
549 | +++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2016-01-11 18:37:19 +0000 |
550 | @@ -9,15 +9,17 @@ |
551 | CRITICAL=0 |
552 | NOTACTIVE='' |
553 | LOGFILE=/var/log/nagios/check_haproxy.log |
554 | -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') |
555 | +AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') |
556 | |
557 | -for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); |
558 | +typeset -i N_INSTANCES=0 |
559 | +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) |
560 | do |
561 | - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') |
562 | + N_INSTANCES=N_INSTANCES+1 |
563 | + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') |
564 | if [ $? != 0 ]; then |
565 | date >> $LOGFILE |
566 | echo $output >> $LOGFILE |
567 | - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 |
568 | + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 |
569 | CRITICAL=1 |
570 | NOTACTIVE="${NOTACTIVE} $appserver" |
571 | fi |
572 | @@ -28,5 +30,5 @@ |
573 | exit 2 |
574 | fi |
575 | |
576 | -echo "OK: All haproxy instances looking good" |
577 | +echo "OK: All haproxy instances ($N_INSTANCES) looking good" |
578 | exit 0 |
579 | |
580 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' |
581 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-10-20 20:38:17 +0000 |
582 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-01-11 18:37:19 +0000 |
583 | @@ -204,8 +204,8 @@ |
584 | database=config('database'), |
585 | ssl_dir=NEUTRON_CONF_DIR)], |
586 | 'services': [], |
587 | - 'packages': [['plumgrid-lxc'], |
588 | - ['iovisor-dkms']], |
589 | + 'packages': ['plumgrid-lxc', |
590 | + 'iovisor-dkms'], |
591 | 'server_packages': ['neutron-server', |
592 | 'neutron-plugin-plumgrid'], |
593 | 'server_services': ['neutron-server'] |
594 | |
595 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' |
596 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-03-03 06:05:14 +0000 |
597 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-01-11 18:37:19 +0000 |
598 | @@ -12,27 +12,35 @@ |
599 | option tcplog |
600 | option dontlognull |
601 | retries 3 |
602 | - timeout queue 1000 |
603 | - timeout connect 1000 |
604 | -{% if haproxy_client_timeout -%} |
605 | +{%- if haproxy_queue_timeout %} |
606 | + timeout queue {{ haproxy_queue_timeout }} |
607 | +{%- else %} |
608 | + timeout queue 5000 |
609 | +{%- endif %} |
610 | +{%- if haproxy_connect_timeout %} |
611 | + timeout connect {{ haproxy_connect_timeout }} |
612 | +{%- else %} |
613 | + timeout connect 5000 |
614 | +{%- endif %} |
615 | +{%- if haproxy_client_timeout %} |
616 | timeout client {{ haproxy_client_timeout }} |
617 | -{% else -%} |
618 | +{%- else %} |
619 | timeout client 30000 |
620 | -{% endif -%} |
621 | - |
622 | -{% if haproxy_server_timeout -%} |
623 | +{%- endif %} |
624 | +{%- if haproxy_server_timeout %} |
625 | timeout server {{ haproxy_server_timeout }} |
626 | -{% else -%} |
627 | +{%- else %} |
628 | timeout server 30000 |
629 | -{% endif -%} |
630 | +{%- endif %} |
631 | |
632 | -listen stats {{ stat_port }} |
633 | +listen stats |
634 | + bind {{ local_host }}:{{ stat_port }} |
635 | mode http |
636 | stats enable |
637 | stats hide-version |
638 | stats realm Haproxy\ Statistics |
639 | stats uri / |
640 | - stats auth admin:password |
641 | + stats auth admin:{{ stat_password }} |
642 | |
643 | {% if frontends -%} |
644 | {% for service, ports in service_ports.items() -%} |
645 | |
646 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
647 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-10-20 20:38:17 +0000 |
648 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2016-01-11 18:37:19 +0000 |
649 | @@ -26,6 +26,7 @@ |
650 | |
651 | import six |
652 | import traceback |
653 | +import uuid |
654 | import yaml |
655 | |
656 | from charmhelpers.contrib.network import ip |
657 | @@ -41,6 +42,7 @@ |
658 | log as juju_log, |
659 | charm_dir, |
660 | INFO, |
661 | + related_units, |
662 | relation_ids, |
663 | relation_set, |
664 | status_set, |
665 | @@ -84,6 +86,7 @@ |
666 | ('utopic', 'juno'), |
667 | ('vivid', 'kilo'), |
668 | ('wily', 'liberty'), |
669 | + ('xenial', 'mitaka'), |
670 | ]) |
671 | |
672 | |
673 | @@ -97,6 +100,7 @@ |
674 | ('2014.2', 'juno'), |
675 | ('2015.1', 'kilo'), |
676 | ('2015.2', 'liberty'), |
677 | + ('2016.1', 'mitaka'), |
678 | ]) |
679 | |
680 | # The ugly duckling |
681 | @@ -127,31 +131,40 @@ |
682 | # >= Liberty version->codename mapping |
683 | PACKAGE_CODENAMES = { |
684 | 'nova-common': OrderedDict([ |
685 | - ('12.0.0', 'liberty'), |
686 | + ('12.0', 'liberty'), |
687 | + ('13.0', 'mitaka'), |
688 | ]), |
689 | 'neutron-common': OrderedDict([ |
690 | - ('7.0.0', 'liberty'), |
691 | + ('7.0', 'liberty'), |
692 | + ('8.0', 'mitaka'), |
693 | ]), |
694 | 'cinder-common': OrderedDict([ |
695 | - ('7.0.0', 'liberty'), |
696 | + ('7.0', 'liberty'), |
697 | + ('8.0', 'mitaka'), |
698 | ]), |
699 | 'keystone': OrderedDict([ |
700 | - ('8.0.0', 'liberty'), |
701 | + ('8.0', 'liberty'), |
702 | + ('9.0', 'mitaka'), |
703 | ]), |
704 | 'horizon-common': OrderedDict([ |
705 | - ('8.0.0', 'liberty'), |
706 | + ('8.0', 'liberty'), |
707 | + ('9.0', 'mitaka'), |
708 | ]), |
709 | 'ceilometer-common': OrderedDict([ |
710 | - ('5.0.0', 'liberty'), |
711 | + ('5.0', 'liberty'), |
712 | + ('6.0', 'mitaka'), |
713 | ]), |
714 | 'heat-common': OrderedDict([ |
715 | - ('5.0.0', 'liberty'), |
716 | + ('5.0', 'liberty'), |
717 | + ('6.0', 'mitaka'), |
718 | ]), |
719 | 'glance-common': OrderedDict([ |
720 | - ('11.0.0', 'liberty'), |
721 | + ('11.0', 'liberty'), |
722 | + ('12.0', 'mitaka'), |
723 | ]), |
724 | 'openstack-dashboard': OrderedDict([ |
725 | - ('8.0.0', 'liberty'), |
726 | + ('8.0', 'liberty'), |
727 | + ('9.0', 'mitaka'), |
728 | ]), |
729 | } |
730 | |
731 | @@ -238,7 +251,14 @@ |
732 | error_out(e) |
733 | |
734 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
735 | - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) |
736 | + if 'swift' in pkg.name: |
737 | + # Fully x.y.z match for swift versions |
738 | + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) |
739 | + else: |
740 | + # x.y match only for 20XX.X |
741 | + # and ignore patch level for other packages |
742 | + match = re.match('^(\d+)\.(\d+)', vers) |
743 | + |
744 | if match: |
745 | vers = match.group(0) |
746 | |
747 | @@ -250,13 +270,8 @@ |
748 | # < Liberty co-ordinated project versions |
749 | try: |
750 | if 'swift' in pkg.name: |
751 | - swift_vers = vers[:5] |
752 | - if swift_vers not in SWIFT_CODENAMES: |
753 | - # Deal with 1.10.0 upward |
754 | - swift_vers = vers[:6] |
755 | - return SWIFT_CODENAMES[swift_vers] |
756 | + return SWIFT_CODENAMES[vers] |
757 | else: |
758 | - vers = vers[:6] |
759 | return OPENSTACK_CODENAMES[vers] |
760 | except KeyError: |
761 | if not fatal: |
762 | @@ -375,6 +390,9 @@ |
763 | 'liberty': 'trusty-updates/liberty', |
764 | 'liberty/updates': 'trusty-updates/liberty', |
765 | 'liberty/proposed': 'trusty-proposed/liberty', |
766 | + 'mitaka': 'trusty-updates/mitaka', |
767 | + 'mitaka/updates': 'trusty-updates/mitaka', |
768 | + 'mitaka/proposed': 'trusty-proposed/mitaka', |
769 | } |
770 | |
771 | try: |
772 | @@ -575,7 +593,7 @@ |
773 | return yaml.load(projects_yaml) |
774 | |
775 | |
776 | -def git_clone_and_install(projects_yaml, core_project, depth=1): |
777 | +def git_clone_and_install(projects_yaml, core_project): |
778 | """ |
779 | Clone/install all specified OpenStack repositories. |
780 | |
781 | @@ -625,6 +643,9 @@ |
782 | for p in projects['repositories']: |
783 | repo = p['repository'] |
784 | branch = p['branch'] |
785 | + depth = '1' |
786 | + if 'depth' in p.keys(): |
787 | + depth = p['depth'] |
788 | if p['name'] == 'requirements': |
789 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
790 | parent_dir, http_proxy, |
791 | @@ -669,19 +690,13 @@ |
792 | """ |
793 | Clone and install a single git repository. |
794 | """ |
795 | - dest_dir = os.path.join(parent_dir, os.path.basename(repo)) |
796 | - |
797 | if not os.path.exists(parent_dir): |
798 | juju_log('Directory already exists at {}. ' |
799 | 'No need to create directory.'.format(parent_dir)) |
800 | os.mkdir(parent_dir) |
801 | |
802 | - if not os.path.exists(dest_dir): |
803 | - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
804 | - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, |
805 | - depth=depth) |
806 | - else: |
807 | - repo_dir = dest_dir |
808 | + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
809 | + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) |
810 | |
811 | venv = os.path.join(parent_dir, 'venv') |
812 | |
813 | @@ -859,7 +874,9 @@ |
814 | if charm_state != 'active' and charm_state != 'unknown': |
815 | state = workload_state_compare(state, charm_state) |
816 | if message: |
817 | - message = "{} {}".format(message, charm_message) |
818 | + charm_message = charm_message.replace("Incomplete relations: ", |
819 | + "") |
820 | + message = "{}, {}".format(message, charm_message) |
821 | else: |
822 | message = charm_message |
823 | |
824 | @@ -976,3 +993,19 @@ |
825 | action_set({'outcome': 'no upgrade available.'}) |
826 | |
827 | return ret |
828 | + |
829 | + |
830 | +def remote_restart(rel_name, remote_service=None): |
831 | + trigger = { |
832 | + 'restart-trigger': str(uuid.uuid4()), |
833 | + } |
834 | + if remote_service: |
835 | + trigger['remote-service'] = remote_service |
836 | + for rid in relation_ids(rel_name): |
837 | + # This subordinate can be related to two seperate services using |
838 | + # different subordinate relations so only issue the restart if |
839 | + # the principle is conencted down the relation we think it is |
840 | + if related_units(relid=rid): |
841 | + relation_set(relation_id=rid, |
842 | + relation_settings=trigger, |
843 | + ) |
844 | |
845 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' |
846 | --- hooks/charmhelpers/contrib/python/packages.py 2015-07-29 10:48:05 +0000 |
847 | +++ hooks/charmhelpers/contrib/python/packages.py 2016-01-11 18:37:19 +0000 |
848 | @@ -42,8 +42,12 @@ |
849 | yield "--{0}={1}".format(key, value) |
850 | |
851 | |
852 | -def pip_install_requirements(requirements, **options): |
853 | - """Install a requirements file """ |
854 | +def pip_install_requirements(requirements, constraints=None, **options): |
855 | + """Install a requirements file. |
856 | + |
857 | + :param constraints: Path to pip constraints file. |
858 | + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files |
859 | + """ |
860 | command = ["install"] |
861 | |
862 | available_options = ('proxy', 'src', 'log', ) |
863 | @@ -51,8 +55,13 @@ |
864 | command.append(option) |
865 | |
866 | command.append("-r {0}".format(requirements)) |
867 | - log("Installing from file: {} with options: {}".format(requirements, |
868 | - command)) |
869 | + if constraints: |
870 | + command.append("-c {0}".format(constraints)) |
871 | + log("Installing from file: {} with constraints {} " |
872 | + "and options: {}".format(requirements, constraints, command)) |
873 | + else: |
874 | + log("Installing from file: {} with options: {}".format(requirements, |
875 | + command)) |
876 | pip_execute(command) |
877 | |
878 | |
879 | |
880 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
881 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-10-20 20:38:17 +0000 |
882 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-01-11 18:37:19 +0000 |
883 | @@ -23,6 +23,8 @@ |
884 | # James Page <james.page@ubuntu.com> |
885 | # Adam Gandelman <adamg@ubuntu.com> |
886 | # |
887 | +import bisect |
888 | +import six |
889 | |
890 | import os |
891 | import shutil |
892 | @@ -72,6 +74,394 @@ |
893 | err to syslog = {use_syslog} |
894 | clog to syslog = {use_syslog} |
895 | """ |
896 | +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) |
897 | +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] |
898 | + |
899 | + |
900 | +def validator(value, valid_type, valid_range=None): |
901 | + """ |
902 | + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values |
903 | + Example input: |
904 | + validator(value=1, |
905 | + valid_type=int, |
906 | + valid_range=[0, 2]) |
907 | + This says I'm testing value=1. It must be an int inclusive in [0,2] |
908 | + |
909 | + :param value: The value to validate |
910 | + :param valid_type: The type that value should be. |
911 | + :param valid_range: A range of values that value can assume. |
912 | + :return: |
913 | + """ |
914 | + assert isinstance(value, valid_type), "{} is not a {}".format( |
915 | + value, |
916 | + valid_type) |
917 | + if valid_range is not None: |
918 | + assert isinstance(valid_range, list), \ |
919 | + "valid_range must be a list, was given {}".format(valid_range) |
920 | + # If we're dealing with strings |
921 | + if valid_type is six.string_types: |
922 | + assert value in valid_range, \ |
923 | + "{} is not in the list {}".format(value, valid_range) |
924 | + # Integer, float should have a min and max |
925 | + else: |
926 | + if len(valid_range) != 2: |
927 | + raise ValueError( |
928 | + "Invalid valid_range list of {} for {}. " |
929 | + "List must be [min,max]".format(valid_range, value)) |
930 | + assert value >= valid_range[0], \ |
931 | + "{} is less than minimum allowed value of {}".format( |
932 | + value, valid_range[0]) |
933 | + assert value <= valid_range[1], \ |
934 | + "{} is greater than maximum allowed value of {}".format( |
935 | + value, valid_range[1]) |
936 | + |
937 | + |
938 | +class PoolCreationError(Exception): |
939 | + """ |
940 | + A custom error to inform the caller that a pool creation failed. Provides an error message |
941 | + """ |
942 | + def __init__(self, message): |
943 | + super(PoolCreationError, self).__init__(message) |
944 | + |
945 | + |
946 | +class Pool(object): |
947 | + """ |
948 | + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. |
949 | + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). |
950 | + """ |
951 | + def __init__(self, service, name): |
952 | + self.service = service |
953 | + self.name = name |
954 | + |
955 | + # Create the pool if it doesn't exist already |
956 | + # To be implemented by subclasses |
957 | + def create(self): |
958 | + pass |
959 | + |
960 | + def add_cache_tier(self, cache_pool, mode): |
961 | + """ |
962 | + Adds a new cache tier to an existing pool. |
963 | + :param cache_pool: six.string_types. The cache tier pool name to add. |
964 | + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] |
965 | + :return: None |
966 | + """ |
967 | + # Check the input types and values |
968 | + validator(value=cache_pool, valid_type=six.string_types) |
969 | + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) |
970 | + |
971 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) |
972 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) |
973 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) |
974 | + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) |
975 | + |
976 | + def remove_cache_tier(self, cache_pool): |
977 | + """ |
978 | + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. |
979 | + :param cache_pool: six.string_types. The cache tier pool name to remove. |
980 | + :return: None |
981 | + """ |
982 | + # read-only is easy, writeback is much harder |
983 | + mode = get_cache_mode(cache_pool) |
984 | + if mode == 'readonly': |
985 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) |
986 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) |
987 | + |
988 | + elif mode == 'writeback': |
989 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) |
990 | + # Flush the cache and wait for it to return |
991 | + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) |
992 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) |
993 | + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) |
994 | + |
995 | + def get_pgs(self, pool_size): |
996 | + """ |
997 | + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for |
998 | + erasure coded pools |
999 | + :return: int. The number of pgs to use. |
1000 | + """ |
1001 | + validator(value=pool_size, valid_type=int) |
1002 | + osds = get_osds(self.service) |
1003 | + if not osds: |
1004 | + # NOTE(james-page): Default to 200 for older ceph versions |
1005 | + # which don't support OSD query from cli |
1006 | + return 200 |
1007 | + |
1008 | + # Calculate based on Ceph best practices |
1009 | + if osds < 5: |
1010 | + return 128 |
1011 | + elif 5 < osds < 10: |
1012 | + return 512 |
1013 | + elif 10 < osds < 50: |
1014 | + return 4096 |
1015 | + else: |
1016 | + estimate = (osds * 100) / pool_size |
1017 | + # Return the next nearest power of 2 |
1018 | + index = bisect.bisect_right(powers_of_two, estimate) |
1019 | + return powers_of_two[index] |
1020 | + |
1021 | + |
1022 | +class ReplicatedPool(Pool): |
1023 | + def __init__(self, service, name, replicas=2): |
1024 | + super(ReplicatedPool, self).__init__(service=service, name=name) |
1025 | + self.replicas = replicas |
1026 | + |
1027 | + def create(self): |
1028 | + if not pool_exists(self.service, self.name): |
1029 | + # Create it |
1030 | + pgs = self.get_pgs(self.replicas) |
1031 | + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] |
1032 | + try: |
1033 | + check_call(cmd) |
1034 | + except CalledProcessError: |
1035 | + raise |
1036 | + |
1037 | + |
1038 | +# Default jerasure erasure coded pool |
1039 | +class ErasurePool(Pool): |
1040 | + def __init__(self, service, name, erasure_code_profile="default"): |
1041 | + super(ErasurePool, self).__init__(service=service, name=name) |
1042 | + self.erasure_code_profile = erasure_code_profile |
1043 | + |
1044 | + def create(self): |
1045 | + if not pool_exists(self.service, self.name): |
1046 | + # Try to find the erasure profile information so we can properly size the pgs |
1047 | + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) |
1048 | + |
1049 | + # Check for errors |
1050 | + if erasure_profile is None: |
1051 | + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), |
1052 | + level=ERROR) |
1053 | + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) |
1054 | + if 'k' not in erasure_profile or 'm' not in erasure_profile: |
1055 | + # Error |
1056 | + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), |
1057 | + level=ERROR) |
1058 | + raise PoolCreationError( |
1059 | + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) |
1060 | + |
1061 | + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) |
1062 | + # Create it |
1063 | + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), |
1064 | + 'erasure', self.erasure_code_profile] |
1065 | + try: |
1066 | + check_call(cmd) |
1067 | + except CalledProcessError: |
1068 | + raise |
1069 | + |
1070 | + """Get an existing erasure code profile if it already exists. |
1071 | + Returns json formatted output""" |
1072 | + |
1073 | + |
1074 | +def get_erasure_profile(service, name): |
1075 | + """ |
1076 | + :param service: six.string_types. The Ceph user name to run the command under |
1077 | + :param name: |
1078 | + :return: |
1079 | + """ |
1080 | + try: |
1081 | + out = check_output(['ceph', '--id', service, |
1082 | + 'osd', 'erasure-code-profile', 'get', |
1083 | + name, '--format=json']) |
1084 | + return json.loads(out) |
1085 | + except (CalledProcessError, OSError, ValueError): |
1086 | + return None |
1087 | + |
1088 | + |
1089 | +def pool_set(service, pool_name, key, value): |
1090 | + """ |
1091 | + Sets a value for a RADOS pool in ceph. |
1092 | + :param service: six.string_types. The Ceph user name to run the command under |
1093 | + :param pool_name: six.string_types |
1094 | + :param key: six.string_types |
1095 | + :param value: |
1096 | + :return: None. Can raise CalledProcessError |
1097 | + """ |
1098 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] |
1099 | + try: |
1100 | + check_call(cmd) |
1101 | + except CalledProcessError: |
1102 | + raise |
1103 | + |
1104 | + |
1105 | +def snapshot_pool(service, pool_name, snapshot_name): |
1106 | + """ |
1107 | + Snapshots a RADOS pool in ceph. |
1108 | + :param service: six.string_types. The Ceph user name to run the command under |
1109 | + :param pool_name: six.string_types |
1110 | + :param snapshot_name: six.string_types |
1111 | + :return: None. Can raise CalledProcessError |
1112 | + """ |
1113 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] |
1114 | + try: |
1115 | + check_call(cmd) |
1116 | + except CalledProcessError: |
1117 | + raise |
1118 | + |
1119 | + |
1120 | +def remove_pool_snapshot(service, pool_name, snapshot_name): |
1121 | + """ |
1122 | + Remove a snapshot from a RADOS pool in ceph. |
1123 | + :param service: six.string_types. The Ceph user name to run the command under |
1124 | + :param pool_name: six.string_types |
1125 | + :param snapshot_name: six.string_types |
1126 | + :return: None. Can raise CalledProcessError |
1127 | + """ |
1128 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] |
1129 | + try: |
1130 | + check_call(cmd) |
1131 | + except CalledProcessError: |
1132 | + raise |
1133 | + |
1134 | + |
1135 | +# max_bytes should be an int or long |
1136 | +def set_pool_quota(service, pool_name, max_bytes): |
1137 | + """ |
1138 | + :param service: six.string_types. The Ceph user name to run the command under |
1139 | + :param pool_name: six.string_types |
1140 | + :param max_bytes: int or long |
1141 | + :return: None. Can raise CalledProcessError |
1142 | + """ |
1143 | + # Set a byte quota on a RADOS pool in ceph. |
1144 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] |
1145 | + try: |
1146 | + check_call(cmd) |
1147 | + except CalledProcessError: |
1148 | + raise |
1149 | + |
1150 | + |
1151 | +def remove_pool_quota(service, pool_name): |
1152 | + """ |
1153 | + Set a byte quota on a RADOS pool in ceph. |
1154 | + :param service: six.string_types. The Ceph user name to run the command under |
1155 | + :param pool_name: six.string_types |
1156 | + :return: None. Can raise CalledProcessError |
1157 | + """ |
1158 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] |
1159 | + try: |
1160 | + check_call(cmd) |
1161 | + except CalledProcessError: |
1162 | + raise |
1163 | + |
1164 | + |
1165 | +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', |
1166 | + data_chunks=2, coding_chunks=1, |
1167 | + locality=None, durability_estimator=None): |
1168 | + """ |
1169 | + Create a new erasure code profile if one does not already exist for it. Updates |
1170 | + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ |
1171 | + for more details |
1172 | + :param service: six.string_types. The Ceph user name to run the command under |
1173 | + :param profile_name: six.string_types |
1174 | + :param erasure_plugin_name: six.string_types |
1175 | + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', |
1176 | + 'room', 'root', 'row']) |
1177 | + :param data_chunks: int |
1178 | + :param coding_chunks: int |
1179 | + :param locality: int |
1180 | + :param durability_estimator: int |
1181 | + :return: None. Can raise CalledProcessError |
1182 | + """ |
1183 | + # Ensure this failure_domain is allowed by Ceph |
1184 | + validator(failure_domain, six.string_types, |
1185 | + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) |
1186 | + |
1187 | + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, |
1188 | + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), |
1189 | + 'ruleset_failure_domain=' + failure_domain] |
1190 | + if locality is not None and durability_estimator is not None: |
1191 | + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") |
1192 | + |
1193 | + # Add plugin specific information |
1194 | + if locality is not None: |
1195 | + # For local erasure codes |
1196 | + cmd.append('l=' + str(locality)) |
1197 | + if durability_estimator is not None: |
1198 | + # For Shec erasure codes |
1199 | + cmd.append('c=' + str(durability_estimator)) |
1200 | + |
1201 | + if erasure_profile_exists(service, profile_name): |
1202 | + cmd.append('--force') |
1203 | + |
1204 | + try: |
1205 | + check_call(cmd) |
1206 | + except CalledProcessError: |
1207 | + raise |
1208 | + |
1209 | + |
1210 | +def rename_pool(service, old_name, new_name): |
1211 | + """ |
1212 | + Rename a Ceph pool from old_name to new_name |
1213 | + :param service: six.string_types. The Ceph user name to run the command under |
1214 | + :param old_name: six.string_types |
1215 | + :param new_name: six.string_types |
1216 | + :return: None |
1217 | + """ |
1218 | + validator(value=old_name, valid_type=six.string_types) |
1219 | + validator(value=new_name, valid_type=six.string_types) |
1220 | + |
1221 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] |
1222 | + check_call(cmd) |
1223 | + |
1224 | + |
1225 | +def erasure_profile_exists(service, name): |
1226 | + """ |
1227 | + Check to see if an Erasure code profile already exists. |
1228 | + :param service: six.string_types. The Ceph user name to run the command under |
1229 | + :param name: six.string_types |
1230 | + :return: int or None |
1231 | + """ |
1232 | + validator(value=name, valid_type=six.string_types) |
1233 | + try: |
1234 | + check_call(['ceph', '--id', service, |
1235 | + 'osd', 'erasure-code-profile', 'get', |
1236 | + name]) |
1237 | + return True |
1238 | + except CalledProcessError: |
1239 | + return False |
1240 | + |
1241 | + |
1242 | +def get_cache_mode(service, pool_name): |
1243 | + """ |
1244 | + Find the current caching mode of the pool_name given. |
1245 | + :param service: six.string_types. The Ceph user name to run the command under |
1246 | + :param pool_name: six.string_types |
1247 | + :return: int or None |
1248 | + """ |
1249 | + validator(value=service, valid_type=six.string_types) |
1250 | + validator(value=pool_name, valid_type=six.string_types) |
1251 | + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) |
1252 | + try: |
1253 | + osd_json = json.loads(out) |
1254 | + for pool in osd_json['pools']: |
1255 | + if pool['pool_name'] == pool_name: |
1256 | + return pool['cache_mode'] |
1257 | + return None |
1258 | + except ValueError: |
1259 | + raise |
1260 | + |
1261 | + |
1262 | +def pool_exists(service, name): |
1263 | + """Check to see if a RADOS pool already exists.""" |
1264 | + try: |
1265 | + out = check_output(['rados', '--id', service, |
1266 | + 'lspools']).decode('UTF-8') |
1267 | + except CalledProcessError: |
1268 | + return False |
1269 | + |
1270 | + return name in out |
1271 | + |
1272 | + |
1273 | +def get_osds(service): |
1274 | + """Return a list of all Ceph Object Storage Daemons currently in the |
1275 | + cluster. |
1276 | + """ |
1277 | + version = ceph_version() |
1278 | + if version and version >= '0.56': |
1279 | + return json.loads(check_output(['ceph', '--id', service, |
1280 | + 'osd', 'ls', |
1281 | + '--format=json']).decode('UTF-8')) |
1282 | + |
1283 | + return None |
1284 | |
1285 | |
1286 | def install(): |
1287 | @@ -101,53 +491,37 @@ |
1288 | check_call(cmd) |
1289 | |
1290 | |
1291 | -def pool_exists(service, name): |
1292 | - """Check to see if a RADOS pool already exists.""" |
1293 | - try: |
1294 | - out = check_output(['rados', '--id', service, |
1295 | - 'lspools']).decode('UTF-8') |
1296 | - except CalledProcessError: |
1297 | - return False |
1298 | - |
1299 | - return name in out |
1300 | - |
1301 | - |
1302 | -def get_osds(service): |
1303 | - """Return a list of all Ceph Object Storage Daemons currently in the |
1304 | - cluster. |
1305 | - """ |
1306 | - version = ceph_version() |
1307 | - if version and version >= '0.56': |
1308 | - return json.loads(check_output(['ceph', '--id', service, |
1309 | - 'osd', 'ls', |
1310 | - '--format=json']).decode('UTF-8')) |
1311 | - |
1312 | - return None |
1313 | - |
1314 | - |
1315 | -def create_pool(service, name, replicas=3): |
1316 | +def update_pool(client, pool, settings): |
1317 | + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] |
1318 | + for k, v in six.iteritems(settings): |
1319 | + cmd.append(k) |
1320 | + cmd.append(v) |
1321 | + |
1322 | + check_call(cmd) |
1323 | + |
1324 | + |
1325 | +def create_pool(service, name, replicas=3, pg_num=None): |
1326 | """Create a new RADOS pool.""" |
1327 | if pool_exists(service, name): |
1328 | log("Ceph pool {} already exists, skipping creation".format(name), |
1329 | level=WARNING) |
1330 | return |
1331 | |
1332 | - # Calculate the number of placement groups based |
1333 | - # on upstream recommended best practices. |
1334 | - osds = get_osds(service) |
1335 | - if osds: |
1336 | - pgnum = (len(osds) * 100 // replicas) |
1337 | - else: |
1338 | - # NOTE(james-page): Default to 200 for older ceph versions |
1339 | - # which don't support OSD query from cli |
1340 | - pgnum = 200 |
1341 | - |
1342 | - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] |
1343 | - check_call(cmd) |
1344 | - |
1345 | - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', |
1346 | - str(replicas)] |
1347 | - check_call(cmd) |
1348 | + if not pg_num: |
1349 | + # Calculate the number of placement groups based |
1350 | + # on upstream recommended best practices. |
1351 | + osds = get_osds(service) |
1352 | + if osds: |
1353 | + pg_num = (len(osds) * 100 // replicas) |
1354 | + else: |
1355 | + # NOTE(james-page): Default to 200 for older ceph versions |
1356 | + # which don't support OSD query from cli |
1357 | + pg_num = 200 |
1358 | + |
1359 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] |
1360 | + check_call(cmd) |
1361 | + |
1362 | + update_pool(service, name, settings={'size': str(replicas)}) |
1363 | |
1364 | |
1365 | def delete_pool(service, name): |
1366 | @@ -202,10 +576,10 @@ |
1367 | log('Created new keyfile at %s.' % keyfile, level=INFO) |
1368 | |
1369 | |
1370 | -def get_ceph_nodes(): |
1371 | - """Query named relation 'ceph' to determine current nodes.""" |
1372 | +def get_ceph_nodes(relation='ceph'): |
1373 | + """Query named relation to determine current nodes.""" |
1374 | hosts = [] |
1375 | - for r_id in relation_ids('ceph'): |
1376 | + for r_id in relation_ids(relation): |
1377 | for unit in related_units(r_id): |
1378 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
1379 | |
1380 | @@ -357,14 +731,14 @@ |
1381 | service_start(svc) |
1382 | |
1383 | |
1384 | -def ensure_ceph_keyring(service, user=None, group=None): |
1385 | +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): |
1386 | """Ensures a ceph keyring is created for a named service and optionally |
1387 | ensures user and group ownership. |
1388 | |
1389 | Returns False if no ceph key is available in relation state. |
1390 | """ |
1391 | key = None |
1392 | - for rid in relation_ids('ceph'): |
1393 | + for rid in relation_ids(relation): |
1394 | for unit in related_units(rid): |
1395 | key = relation_get('key', rid=rid, unit=unit) |
1396 | if key: |
1397 | @@ -405,6 +779,7 @@ |
1398 | |
1399 | The API is versioned and defaults to version 1. |
1400 | """ |
1401 | + |
1402 | def __init__(self, api_version=1, request_id=None): |
1403 | self.api_version = api_version |
1404 | if request_id: |
1405 | @@ -413,9 +788,16 @@ |
1406 | self.request_id = str(uuid.uuid1()) |
1407 | self.ops = [] |
1408 | |
1409 | - def add_op_create_pool(self, name, replica_count=3): |
1410 | + def add_op_create_pool(self, name, replica_count=3, pg_num=None): |
1411 | + """Adds an operation to create a pool. |
1412 | + |
1413 | + @param pg_num setting: optional setting. If not provided, this value |
1414 | + will be calculated by the broker based on how many OSDs are in the |
1415 | + cluster at the time of creation. Note that, if provided, this value |
1416 | + will be capped at the current available maximum. |
1417 | + """ |
1418 | self.ops.append({'op': 'create-pool', 'name': name, |
1419 | - 'replicas': replica_count}) |
1420 | + 'replicas': replica_count, 'pg_num': pg_num}) |
1421 | |
1422 | def set_ops(self, ops): |
1423 | """Set request ops to provided value. |
1424 | @@ -433,8 +815,8 @@ |
1425 | def _ops_equal(self, other): |
1426 | if len(self.ops) == len(other.ops): |
1427 | for req_no in range(0, len(self.ops)): |
1428 | - for key in ['replicas', 'name', 'op']: |
1429 | - if self.ops[req_no][key] != other.ops[req_no][key]: |
1430 | + for key in ['replicas', 'name', 'op', 'pg_num']: |
1431 | + if self.ops[req_no].get(key) != other.ops[req_no].get(key): |
1432 | return False |
1433 | else: |
1434 | return False |
1435 | @@ -540,7 +922,7 @@ |
1436 | return request |
1437 | |
1438 | |
1439 | -def get_request_states(request): |
1440 | +def get_request_states(request, relation='ceph'): |
1441 | """Return a dict of requests per relation id with their corresponding |
1442 | completion state. |
1443 | |
1444 | @@ -552,7 +934,7 @@ |
1445 | """ |
1446 | complete = [] |
1447 | requests = {} |
1448 | - for rid in relation_ids('ceph'): |
1449 | + for rid in relation_ids(relation): |
1450 | complete = False |
1451 | previous_request = get_previous_request(rid) |
1452 | if request == previous_request: |
1453 | @@ -570,14 +952,14 @@ |
1454 | return requests |
1455 | |
1456 | |
1457 | -def is_request_sent(request): |
1458 | +def is_request_sent(request, relation='ceph'): |
1459 | """Check to see if a functionally equivalent request has already been sent |
1460 | |
1461 | Returns True if a similair request has been sent |
1462 | |
1463 | @param request: A CephBrokerRq object |
1464 | """ |
1465 | - states = get_request_states(request) |
1466 | + states = get_request_states(request, relation=relation) |
1467 | for rid in states.keys(): |
1468 | if not states[rid]['sent']: |
1469 | return False |
1470 | @@ -585,7 +967,7 @@ |
1471 | return True |
1472 | |
1473 | |
1474 | -def is_request_complete(request): |
1475 | +def is_request_complete(request, relation='ceph'): |
1476 | """Check to see if a functionally equivalent request has already been |
1477 | completed |
1478 | |
1479 | @@ -593,7 +975,7 @@ |
1480 | |
1481 | @param request: A CephBrokerRq object |
1482 | """ |
1483 | - states = get_request_states(request) |
1484 | + states = get_request_states(request, relation=relation) |
1485 | for rid in states.keys(): |
1486 | if not states[rid]['complete']: |
1487 | return False |
1488 | @@ -643,15 +1025,15 @@ |
1489 | return 'broker-rsp-' + local_unit().replace('/', '-') |
1490 | |
1491 | |
1492 | -def send_request_if_needed(request): |
1493 | +def send_request_if_needed(request, relation='ceph'): |
1494 | """Send broker request if an equivalent request has not already been sent |
1495 | |
1496 | @param request: A CephBrokerRq object |
1497 | """ |
1498 | - if is_request_sent(request): |
1499 | + if is_request_sent(request, relation=relation): |
1500 | log('Request already sent but not complete, not sending new request', |
1501 | level=DEBUG) |
1502 | else: |
1503 | - for rid in relation_ids('ceph'): |
1504 | + for rid in relation_ids(relation): |
1505 | log('Sending request {}'.format(request.request_id), level=DEBUG) |
1506 | relation_set(relation_id=rid, broker_req=request.request) |
1507 | |
1508 | === modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' |
1509 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-26 09:45:59 +0000 |
1510 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-01-11 18:37:19 +0000 |
1511 | @@ -76,3 +76,13 @@ |
1512 | check_call(cmd) |
1513 | |
1514 | return create_loopback(path) |
1515 | + |
1516 | + |
1517 | +def is_mapped_loopback_device(device): |
1518 | + """ |
1519 | + Checks if a given device name is an existing/mapped loopback device. |
1520 | + :param device: str: Full path to the device (eg, /dev/loop1). |
1521 | + :returns: str: Path to the backing file if is a loopback device |
1522 | + empty string otherwise |
1523 | + """ |
1524 | + return loopback_devices().get(device, "") |
1525 | |
1526 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
1527 | --- hooks/charmhelpers/core/hookenv.py 2015-10-20 20:38:17 +0000 |
1528 | +++ hooks/charmhelpers/core/hookenv.py 2016-01-11 18:37:19 +0000 |
1529 | @@ -491,6 +491,19 @@ |
1530 | |
1531 | |
1532 | @cached |
1533 | +def peer_relation_id(): |
1534 | + '''Get the peers relation id if a peers relation has been joined, else None.''' |
1535 | + md = metadata() |
1536 | + section = md.get('peers') |
1537 | + if section: |
1538 | + for key in section: |
1539 | + relids = relation_ids(key) |
1540 | + if relids: |
1541 | + return relids[0] |
1542 | + return None |
1543 | + |
1544 | + |
1545 | +@cached |
1546 | def relation_to_interface(relation_name): |
1547 | """ |
1548 | Given the name of a relation, return the interface that relation uses. |
1549 | @@ -504,12 +517,12 @@ |
1550 | def relation_to_role_and_interface(relation_name): |
1551 | """ |
1552 | Given the name of a relation, return the role and the name of the interface |
1553 | - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). |
1554 | + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). |
1555 | |
1556 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
1557 | """ |
1558 | _metadata = metadata() |
1559 | - for role in ('provides', 'requires', 'peer'): |
1560 | + for role in ('provides', 'requires', 'peers'): |
1561 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
1562 | if interface: |
1563 | return role, interface |
1564 | @@ -521,7 +534,7 @@ |
1565 | """ |
1566 | Given a role and interface name, return a list of relation names for the |
1567 | current charm that use that interface under that role (where role is one |
1568 | - of ``provides``, ``requires``, or ``peer``). |
1569 | + of ``provides``, ``requires``, or ``peers``). |
1570 | |
1571 | :returns: A list of relation names. |
1572 | """ |
1573 | @@ -542,7 +555,7 @@ |
1574 | :returns: A list of relation names. |
1575 | """ |
1576 | results = [] |
1577 | - for role in ('provides', 'requires', 'peer'): |
1578 | + for role in ('provides', 'requires', 'peers'): |
1579 | results.extend(role_and_interface_to_relations(role, interface_name)) |
1580 | return results |
1581 | |
1582 | @@ -624,7 +637,7 @@ |
1583 | |
1584 | |
1585 | @cached |
1586 | -def storage_get(attribute="", storage_id=""): |
1587 | +def storage_get(attribute=None, storage_id=None): |
1588 | """Get storage attributes""" |
1589 | _args = ['storage-get', '--format=json'] |
1590 | if storage_id: |
1591 | @@ -638,7 +651,7 @@ |
1592 | |
1593 | |
1594 | @cached |
1595 | -def storage_list(storage_name=""): |
1596 | +def storage_list(storage_name=None): |
1597 | """List the storage IDs for the unit""" |
1598 | _args = ['storage-list', '--format=json'] |
1599 | if storage_name: |
1600 | @@ -820,6 +833,7 @@ |
1601 | |
1602 | def translate_exc(from_exc, to_exc): |
1603 | def inner_translate_exc1(f): |
1604 | + @wraps(f) |
1605 | def inner_translate_exc2(*args, **kwargs): |
1606 | try: |
1607 | return f(*args, **kwargs) |
1608 | @@ -864,6 +878,40 @@ |
1609 | subprocess.check_call(cmd) |
1610 | |
1611 | |
1612 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
1613 | +def payload_register(ptype, klass, pid): |
1614 | + """ is used while a hook is running to let Juju know that a |
1615 | + payload has been started.""" |
1616 | + cmd = ['payload-register'] |
1617 | + for x in [ptype, klass, pid]: |
1618 | + cmd.append(x) |
1619 | + subprocess.check_call(cmd) |
1620 | + |
1621 | + |
1622 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
1623 | +def payload_unregister(klass, pid): |
1624 | + """ is used while a hook is running to let Juju know |
1625 | + that a payload has been manually stopped. The <class> and <id> provided |
1626 | + must match a payload that has been previously registered with juju using |
1627 | + payload-register.""" |
1628 | + cmd = ['payload-unregister'] |
1629 | + for x in [klass, pid]: |
1630 | + cmd.append(x) |
1631 | + subprocess.check_call(cmd) |
1632 | + |
1633 | + |
1634 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
1635 | +def payload_status_set(klass, pid, status): |
1636 | + """is used to update the current status of a registered payload. |
1637 | + The <class> and <id> provided must match a payload that has been previously |
1638 | + registered with juju using payload-register. The <status> must be one of the |
1639 | + follow: starting, started, stopping, stopped""" |
1640 | + cmd = ['payload-status-set'] |
1641 | + for x in [klass, pid, status]: |
1642 | + cmd.append(x) |
1643 | + subprocess.check_call(cmd) |
1644 | + |
1645 | + |
1646 | @cached |
1647 | def juju_version(): |
1648 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
1649 | |
1650 | === modified file 'hooks/charmhelpers/core/host.py' |
1651 | --- hooks/charmhelpers/core/host.py 2015-10-20 20:38:17 +0000 |
1652 | +++ hooks/charmhelpers/core/host.py 2016-01-11 18:37:19 +0000 |
1653 | @@ -67,10 +67,14 @@ |
1654 | """Pause a system service. |
1655 | |
1656 | Stop it, and prevent it from starting again at boot.""" |
1657 | - stopped = service_stop(service_name) |
1658 | + stopped = True |
1659 | + if service_running(service_name): |
1660 | + stopped = service_stop(service_name) |
1661 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
1662 | sysv_file = os.path.join(initd_dir, service_name) |
1663 | - if os.path.exists(upstart_file): |
1664 | + if init_is_systemd(): |
1665 | + service('disable', service_name) |
1666 | + elif os.path.exists(upstart_file): |
1667 | override_path = os.path.join( |
1668 | init_dir, '{}.override'.format(service_name)) |
1669 | with open(override_path, 'w') as fh: |
1670 | @@ -78,9 +82,9 @@ |
1671 | elif os.path.exists(sysv_file): |
1672 | subprocess.check_call(["update-rc.d", service_name, "disable"]) |
1673 | else: |
1674 | - # XXX: Support SystemD too |
1675 | raise ValueError( |
1676 | - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( |
1677 | + "Unable to detect {0} as SystemD, Upstart {1} or" |
1678 | + " SysV {2}".format( |
1679 | service_name, upstart_file, sysv_file)) |
1680 | return stopped |
1681 | |
1682 | @@ -92,7 +96,9 @@ |
1683 | Reenable starting again at boot. Start the service""" |
1684 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
1685 | sysv_file = os.path.join(initd_dir, service_name) |
1686 | - if os.path.exists(upstart_file): |
1687 | + if init_is_systemd(): |
1688 | + service('enable', service_name) |
1689 | + elif os.path.exists(upstart_file): |
1690 | override_path = os.path.join( |
1691 | init_dir, '{}.override'.format(service_name)) |
1692 | if os.path.exists(override_path): |
1693 | @@ -100,34 +106,42 @@ |
1694 | elif os.path.exists(sysv_file): |
1695 | subprocess.check_call(["update-rc.d", service_name, "enable"]) |
1696 | else: |
1697 | - # XXX: Support SystemD too |
1698 | raise ValueError( |
1699 | - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( |
1700 | + "Unable to detect {0} as SystemD, Upstart {1} or" |
1701 | + " SysV {2}".format( |
1702 | service_name, upstart_file, sysv_file)) |
1703 | |
1704 | - started = service_start(service_name) |
1705 | + started = service_running(service_name) |
1706 | + if not started: |
1707 | + started = service_start(service_name) |
1708 | return started |
1709 | |
1710 | |
1711 | def service(action, service_name): |
1712 | """Control a system service""" |
1713 | - cmd = ['service', service_name, action] |
1714 | + if init_is_systemd(): |
1715 | + cmd = ['systemctl', action, service_name] |
1716 | + else: |
1717 | + cmd = ['service', service_name, action] |
1718 | return subprocess.call(cmd) == 0 |
1719 | |
1720 | |
1721 | -def service_running(service): |
1722 | +def service_running(service_name): |
1723 | """Determine whether a system service is running""" |
1724 | - try: |
1725 | - output = subprocess.check_output( |
1726 | - ['service', service, 'status'], |
1727 | - stderr=subprocess.STDOUT).decode('UTF-8') |
1728 | - except subprocess.CalledProcessError: |
1729 | - return False |
1730 | + if init_is_systemd(): |
1731 | + return service('is-active', service_name) |
1732 | else: |
1733 | - if ("start/running" in output or "is running" in output): |
1734 | - return True |
1735 | - else: |
1736 | + try: |
1737 | + output = subprocess.check_output( |
1738 | + ['service', service_name, 'status'], |
1739 | + stderr=subprocess.STDOUT).decode('UTF-8') |
1740 | + except subprocess.CalledProcessError: |
1741 | return False |
1742 | + else: |
1743 | + if ("start/running" in output or "is running" in output): |
1744 | + return True |
1745 | + else: |
1746 | + return False |
1747 | |
1748 | |
1749 | def service_available(service_name): |
1750 | @@ -142,8 +156,29 @@ |
1751 | return True |
1752 | |
1753 | |
1754 | -def adduser(username, password=None, shell='/bin/bash', system_user=False): |
1755 | - """Add a user to the system""" |
1756 | +SYSTEMD_SYSTEM = '/run/systemd/system' |
1757 | + |
1758 | + |
1759 | +def init_is_systemd(): |
1760 | + return os.path.isdir(SYSTEMD_SYSTEM) |
1761 | + |
1762 | + |
1763 | +def adduser(username, password=None, shell='/bin/bash', system_user=False, |
1764 | + primary_group=None, secondary_groups=None): |
1765 | + """ |
1766 | + Add a user to the system. |
1767 | + |
1768 | + Will log but otherwise succeed if the user already exists. |
1769 | + |
1770 | + :param str username: Username to create |
1771 | + :param str password: Password for user; if ``None``, create a system user |
1772 | + :param str shell: The default shell for the user |
1773 | + :param bool system_user: Whether to create a login or system user |
1774 | + :param str primary_group: Primary group for user; defaults to their username |
1775 | + :param list secondary_groups: Optional list of additional groups |
1776 | + |
1777 | + :returns: The password database entry struct, as returned by `pwd.getpwnam` |
1778 | + """ |
1779 | try: |
1780 | user_info = pwd.getpwnam(username) |
1781 | log('user {0} already exists!'.format(username)) |
1782 | @@ -158,6 +193,16 @@ |
1783 | '--shell', shell, |
1784 | '--password', password, |
1785 | ]) |
1786 | + if not primary_group: |
1787 | + try: |
1788 | + grp.getgrnam(username) |
1789 | + primary_group = username # avoid "group exists" error |
1790 | + except KeyError: |
1791 | + pass |
1792 | + if primary_group: |
1793 | + cmd.extend(['-g', primary_group]) |
1794 | + if secondary_groups: |
1795 | + cmd.extend(['-G', ','.join(secondary_groups)]) |
1796 | cmd.append(username) |
1797 | subprocess.check_call(cmd) |
1798 | user_info = pwd.getpwnam(username) |
1799 | @@ -595,3 +640,19 @@ |
1800 | |
1801 | def lchownr(path, owner, group): |
1802 | chownr(path, owner, group, follow_links=False) |
1803 | + |
1804 | + |
1805 | +def get_total_ram(): |
1806 | + '''The total amount of system RAM in bytes. |
1807 | + |
1808 | + This is what is reported by the OS, and may be overcommitted when |
1809 | + there are multiple containers hosted on the same machine. |
1810 | + ''' |
1811 | + with open('/proc/meminfo', 'r') as f: |
1812 | + for line in f.readlines(): |
1813 | + if line: |
1814 | + key, value, unit = line.split() |
1815 | + if key == 'MemTotal:': |
1816 | + assert unit == 'kB', 'Unknown unit' |
1817 | + return int(value) * 1024 # Classic, not KiB. |
1818 | + raise NotImplementedError() |
1819 | |
1820 | === modified file 'hooks/charmhelpers/core/services/helpers.py' |
1821 | --- hooks/charmhelpers/core/services/helpers.py 2015-08-19 13:49:53 +0000 |
1822 | +++ hooks/charmhelpers/core/services/helpers.py 2016-01-11 18:37:19 +0000 |
1823 | @@ -243,33 +243,40 @@ |
1824 | :param str source: The template source file, relative to |
1825 | `$CHARM_DIR/templates` |
1826 | |
1827 | - :param str target: The target to write the rendered template to |
1828 | + :param str target: The target to write the rendered template to (or None) |
1829 | :param str owner: The owner of the rendered file |
1830 | :param str group: The group of the rendered file |
1831 | :param int perms: The permissions of the rendered file |
1832 | :param partial on_change_action: functools partial to be executed when |
1833 | rendered file changes |
1834 | + :param jinja2 loader template_loader: A jinja2 template loader |
1835 | + |
1836 | + :return str: The rendered template |
1837 | """ |
1838 | def __init__(self, source, target, |
1839 | owner='root', group='root', perms=0o444, |
1840 | - on_change_action=None): |
1841 | + on_change_action=None, template_loader=None): |
1842 | self.source = source |
1843 | self.target = target |
1844 | self.owner = owner |
1845 | self.group = group |
1846 | self.perms = perms |
1847 | self.on_change_action = on_change_action |
1848 | + self.template_loader = template_loader |
1849 | |
1850 | def __call__(self, manager, service_name, event_name): |
1851 | pre_checksum = '' |
1852 | if self.on_change_action and os.path.isfile(self.target): |
1853 | pre_checksum = host.file_hash(self.target) |
1854 | service = manager.get_service(service_name) |
1855 | - context = {} |
1856 | + context = {'ctx': {}} |
1857 | for ctx in service.get('required_data', []): |
1858 | context.update(ctx) |
1859 | - templating.render(self.source, self.target, context, |
1860 | - self.owner, self.group, self.perms) |
1861 | + context['ctx'].update(ctx) |
1862 | + |
1863 | + result = templating.render(self.source, self.target, context, |
1864 | + self.owner, self.group, self.perms, |
1865 | + template_loader=self.template_loader) |
1866 | if self.on_change_action: |
1867 | if pre_checksum == host.file_hash(self.target): |
1868 | hookenv.log( |
1869 | @@ -278,6 +285,8 @@ |
1870 | else: |
1871 | self.on_change_action() |
1872 | |
1873 | + return result |
1874 | + |
1875 | |
1876 | # Convenience aliases for templates |
1877 | render_template = template = TemplateCallback |
1878 | |
1879 | === modified file 'hooks/charmhelpers/core/templating.py' |
1880 | --- hooks/charmhelpers/core/templating.py 2015-03-26 17:59:44 +0000 |
1881 | +++ hooks/charmhelpers/core/templating.py 2016-01-11 18:37:19 +0000 |
1882 | @@ -21,13 +21,14 @@ |
1883 | |
1884 | |
1885 | def render(source, target, context, owner='root', group='root', |
1886 | - perms=0o444, templates_dir=None, encoding='UTF-8'): |
1887 | + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): |
1888 | """ |
1889 | Render a template. |
1890 | |
1891 | The `source` path, if not absolute, is relative to the `templates_dir`. |
1892 | |
1893 | - The `target` path should be absolute. |
1894 | + The `target` path should be absolute. It can also be `None`, in which |
1895 | + case no file will be written. |
1896 | |
1897 | The context should be a dict containing the values to be replaced in the |
1898 | template. |
1899 | @@ -36,6 +37,9 @@ |
1900 | |
1901 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. |
1902 | |
1903 | + The rendered template will be written to the file as well as being returned |
1904 | + as a string. |
1905 | + |
1906 | Note: Using this requires python-jinja2; if it is not installed, calling |
1907 | this will attempt to use charmhelpers.fetch.apt_install to install it. |
1908 | """ |
1909 | @@ -52,17 +56,26 @@ |
1910 | apt_install('python-jinja2', fatal=True) |
1911 | from jinja2 import FileSystemLoader, Environment, exceptions |
1912 | |
1913 | - if templates_dir is None: |
1914 | - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') |
1915 | - loader = Environment(loader=FileSystemLoader(templates_dir)) |
1916 | + if template_loader: |
1917 | + template_env = Environment(loader=template_loader) |
1918 | + else: |
1919 | + if templates_dir is None: |
1920 | + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') |
1921 | + template_env = Environment(loader=FileSystemLoader(templates_dir)) |
1922 | try: |
1923 | source = source |
1924 | - template = loader.get_template(source) |
1925 | + template = template_env.get_template(source) |
1926 | except exceptions.TemplateNotFound as e: |
1927 | hookenv.log('Could not load template %s from %s.' % |
1928 | (source, templates_dir), |
1929 | level=hookenv.ERROR) |
1930 | raise e |
1931 | content = template.render(context) |
1932 | - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) |
1933 | - host.write_file(target, content.encode(encoding), owner, group, perms) |
1934 | + if target is not None: |
1935 | + target_dir = os.path.dirname(target) |
1936 | + if not os.path.exists(target_dir): |
1937 | + # This is a terrible default directory permission, as the file |
1938 | + # or its siblings will often contain secrets. |
1939 | + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) |
1940 | + host.write_file(target, content.encode(encoding), owner, group, perms) |
1941 | + return content |
1942 | |
1943 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
1944 | --- hooks/charmhelpers/fetch/__init__.py 2015-08-19 13:49:53 +0000 |
1945 | +++ hooks/charmhelpers/fetch/__init__.py 2016-01-11 18:37:19 +0000 |
1946 | @@ -98,6 +98,14 @@ |
1947 | 'liberty/proposed': 'trusty-proposed/liberty', |
1948 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
1949 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
1950 | + # Mitaka |
1951 | + 'mitaka': 'trusty-updates/mitaka', |
1952 | + 'trusty-mitaka': 'trusty-updates/mitaka', |
1953 | + 'trusty-mitaka/updates': 'trusty-updates/mitaka', |
1954 | + 'trusty-updates/mitaka': 'trusty-updates/mitaka', |
1955 | + 'mitaka/proposed': 'trusty-proposed/mitaka', |
1956 | + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', |
1957 | + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', |
1958 | } |
1959 | |
1960 | # The order of this list is very important. Handlers should be listed in from |
1961 | @@ -225,12 +233,12 @@ |
1962 | |
1963 | def apt_mark(packages, mark, fatal=False): |
1964 | """Flag one or more packages using apt-mark""" |
1965 | + log("Marking {} as {}".format(packages, mark)) |
1966 | cmd = ['apt-mark', mark] |
1967 | if isinstance(packages, six.string_types): |
1968 | cmd.append(packages) |
1969 | else: |
1970 | cmd.extend(packages) |
1971 | - log("Holding {}".format(packages)) |
1972 | |
1973 | if fatal: |
1974 | subprocess.check_call(cmd, universal_newlines=True) |
1975 | @@ -411,7 +419,7 @@ |
1976 | importlib.import_module(package), |
1977 | classname) |
1978 | plugin_list.append(handler_class()) |
1979 | - except (ImportError, AttributeError): |
1980 | + except NotImplementedError: |
1981 | # Skip missing plugins so that they can be ommitted from |
1982 | # installation if desired |
1983 | log("FetchHandler {} not found, skipping plugin".format( |
1984 | |
1985 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
1986 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-07-29 10:48:05 +0000 |
1987 | +++ hooks/charmhelpers/fetch/archiveurl.py 2016-01-11 18:37:19 +0000 |
1988 | @@ -108,7 +108,7 @@ |
1989 | install_opener(opener) |
1990 | response = urlopen(source) |
1991 | try: |
1992 | - with open(dest, 'w') as dest_file: |
1993 | + with open(dest, 'wb') as dest_file: |
1994 | dest_file.write(response.read()) |
1995 | except Exception as e: |
1996 | if os.path.isfile(dest): |
1997 | |
1998 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' |
1999 | --- hooks/charmhelpers/fetch/bzrurl.py 2015-01-26 09:45:59 +0000 |
2000 | +++ hooks/charmhelpers/fetch/bzrurl.py 2016-01-11 18:37:19 +0000 |
2001 | @@ -15,60 +15,50 @@ |
2002 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2003 | |
2004 | import os |
2005 | +from subprocess import check_call |
2006 | from charmhelpers.fetch import ( |
2007 | BaseFetchHandler, |
2008 | - UnhandledSource |
2009 | + UnhandledSource, |
2010 | + filter_installed_packages, |
2011 | + apt_install, |
2012 | ) |
2013 | from charmhelpers.core.host import mkdir |
2014 | |
2015 | -import six |
2016 | -if six.PY3: |
2017 | - raise ImportError('bzrlib does not support Python3') |
2018 | |
2019 | -try: |
2020 | - from bzrlib.branch import Branch |
2021 | - from bzrlib import bzrdir, workingtree, errors |
2022 | -except ImportError: |
2023 | - from charmhelpers.fetch import apt_install |
2024 | - apt_install("python-bzrlib") |
2025 | - from bzrlib.branch import Branch |
2026 | - from bzrlib import bzrdir, workingtree, errors |
2027 | +if filter_installed_packages(['bzr']) != []: |
2028 | + apt_install(['bzr']) |
2029 | + if filter_installed_packages(['bzr']) != []: |
2030 | + raise NotImplementedError('Unable to install bzr') |
2031 | |
2032 | |
2033 | class BzrUrlFetchHandler(BaseFetchHandler): |
2034 | """Handler for bazaar branches via generic and lp URLs""" |
2035 | def can_handle(self, source): |
2036 | url_parts = self.parse_url(source) |
2037 | - if url_parts.scheme not in ('bzr+ssh', 'lp'): |
2038 | + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): |
2039 | return False |
2040 | + elif not url_parts.scheme: |
2041 | + return os.path.exists(os.path.join(source, '.bzr')) |
2042 | else: |
2043 | return True |
2044 | |
2045 | def branch(self, source, dest): |
2046 | - url_parts = self.parse_url(source) |
2047 | - # If we use lp:branchname scheme we need to load plugins |
2048 | if not self.can_handle(source): |
2049 | raise UnhandledSource("Cannot handle {}".format(source)) |
2050 | - if url_parts.scheme == "lp": |
2051 | - from bzrlib.plugin import load_plugins |
2052 | - load_plugins() |
2053 | - try: |
2054 | - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) |
2055 | - except errors.AlreadyControlDirError: |
2056 | - local_branch = Branch.open(dest) |
2057 | - try: |
2058 | - remote_branch = Branch.open(source) |
2059 | - remote_branch.push(local_branch) |
2060 | - tree = workingtree.WorkingTree.open(dest) |
2061 | - tree.update() |
2062 | - except Exception as e: |
2063 | - raise e |
2064 | + if os.path.exists(dest): |
2065 | + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) |
2066 | + else: |
2067 | + check_call(['bzr', 'branch', source, dest]) |
2068 | |
2069 | - def install(self, source): |
2070 | + def install(self, source, dest=None): |
2071 | url_parts = self.parse_url(source) |
2072 | branch_name = url_parts.path.strip("/").split("/")[-1] |
2073 | - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
2074 | - branch_name) |
2075 | + if dest: |
2076 | + dest_dir = os.path.join(dest, branch_name) |
2077 | + else: |
2078 | + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
2079 | + branch_name) |
2080 | + |
2081 | if not os.path.exists(dest_dir): |
2082 | mkdir(dest_dir, perms=0o755) |
2083 | try: |
2084 | |
2085 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
2086 | --- hooks/charmhelpers/fetch/giturl.py 2015-07-29 10:48:05 +0000 |
2087 | +++ hooks/charmhelpers/fetch/giturl.py 2016-01-11 18:37:19 +0000 |
2088 | @@ -15,24 +15,18 @@ |
2089 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2090 | |
2091 | import os |
2092 | +from subprocess import check_call |
2093 | from charmhelpers.fetch import ( |
2094 | BaseFetchHandler, |
2095 | - UnhandledSource |
2096 | + UnhandledSource, |
2097 | + filter_installed_packages, |
2098 | + apt_install, |
2099 | ) |
2100 | -from charmhelpers.core.host import mkdir |
2101 | - |
2102 | -import six |
2103 | -if six.PY3: |
2104 | - raise ImportError('GitPython does not support Python 3') |
2105 | - |
2106 | -try: |
2107 | - from git import Repo |
2108 | -except ImportError: |
2109 | - from charmhelpers.fetch import apt_install |
2110 | - apt_install("python-git") |
2111 | - from git import Repo |
2112 | - |
2113 | -from git.exc import GitCommandError # noqa E402 |
2114 | + |
2115 | +if filter_installed_packages(['git']) != []: |
2116 | + apt_install(['git']) |
2117 | + if filter_installed_packages(['git']) != []: |
2118 | + raise NotImplementedError('Unable to install git') |
2119 | |
2120 | |
2121 | class GitUrlFetchHandler(BaseFetchHandler): |
2122 | @@ -40,19 +34,24 @@ |
2123 | def can_handle(self, source): |
2124 | url_parts = self.parse_url(source) |
2125 | # TODO (mattyw) no support for ssh git@ yet |
2126 | - if url_parts.scheme not in ('http', 'https', 'git'): |
2127 | + if url_parts.scheme not in ('http', 'https', 'git', ''): |
2128 | return False |
2129 | + elif not url_parts.scheme: |
2130 | + return os.path.exists(os.path.join(source, '.git')) |
2131 | else: |
2132 | return True |
2133 | |
2134 | - def clone(self, source, dest, branch, depth=None): |
2135 | + def clone(self, source, dest, branch="master", depth=None): |
2136 | if not self.can_handle(source): |
2137 | raise UnhandledSource("Cannot handle {}".format(source)) |
2138 | |
2139 | - if depth: |
2140 | - Repo.clone_from(source, dest, branch=branch, depth=depth) |
2141 | + if os.path.exists(dest): |
2142 | + cmd = ['git', '-C', dest, 'pull', source, branch] |
2143 | else: |
2144 | - Repo.clone_from(source, dest, branch=branch) |
2145 | + cmd = ['git', 'clone', source, dest, '--branch', branch] |
2146 | + if depth: |
2147 | + cmd.extend(['--depth', depth]) |
2148 | + check_call(cmd) |
2149 | |
2150 | def install(self, source, branch="master", dest=None, depth=None): |
2151 | url_parts = self.parse_url(source) |
2152 | @@ -62,12 +61,8 @@ |
2153 | else: |
2154 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
2155 | branch_name) |
2156 | - if not os.path.exists(dest_dir): |
2157 | - mkdir(dest_dir, perms=0o755) |
2158 | try: |
2159 | self.clone(source, dest_dir, branch, depth) |
2160 | - except GitCommandError as e: |
2161 | - raise UnhandledSource(e) |
2162 | except OSError as e: |
2163 | raise UnhandledSource(e.strerror) |
2164 | return dest_dir |
2165 | |
2166 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
2167 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-21 04:38:09 +0000 |
2168 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-11 18:37:19 +0000 |
2169 | @@ -124,7 +124,8 @@ |
2170 | 'ceph-osd', 'ceph-radosgw'] |
2171 | |
2172 | # Charms which can not use openstack-origin, ie. many subordinates |
2173 | - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
2174 | + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
2175 | + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] |
2176 | |
2177 | if self.openstack: |
2178 | for svc in services: |
2179 | @@ -224,7 +225,8 @@ |
2180 | self.precise_havana, self.precise_icehouse, |
2181 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2182 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2183 | - self.wily_liberty) = range(12) |
2184 | + self.wily_liberty, self.trusty_mitaka, |
2185 | + self.xenial_mitaka) = range(14) |
2186 | |
2187 | releases = { |
2188 | ('precise', None): self.precise_essex, |
2189 | @@ -236,9 +238,11 @@ |
2190 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
2191 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
2192 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
2193 | + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, |
2194 | ('utopic', None): self.utopic_juno, |
2195 | ('vivid', None): self.vivid_kilo, |
2196 | - ('wily', None): self.wily_liberty} |
2197 | + ('wily', None): self.wily_liberty, |
2198 | + ('xenial', None): self.xenial_mitaka} |
2199 | return releases[(self.series, self.openstack)] |
2200 | |
2201 | def _get_openstack_release_string(self): |
2202 | @@ -255,6 +259,7 @@ |
2203 | ('utopic', 'juno'), |
2204 | ('vivid', 'kilo'), |
2205 | ('wily', 'liberty'), |
2206 | + ('xenial', 'mitaka'), |
2207 | ]) |
2208 | if self.openstack: |
2209 | os_origin = self.openstack.split(':')[1] |
charm_lint_check #17088 rabbitmq- server- next for 1chb1n mp282210
LINT OK: passed
Build: http:// 10.245. 162.77: 8080/job/ charm_lint_ check/17088/