Merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp into lp:~openstack-charmers/charms/trusty/cisco-vpp/next
- Trusty Tahr (14.04)
- dhcp
- Merge into next
Proposed by
Liam Young
Status: | Merged |
---|---|
Merged at revision: | 116 |
Proposed branch: | lp:~gnuoy/charms/trusty/cisco-vpp/dhcp |
Merge into: | lp:~openstack-charmers/charms/trusty/cisco-vpp/next |
Diff against target: |
5609 lines (+3361/-493) 40 files modified
charm-helpers-hooks.yaml (+1/-1) config.yaml (+8/-0) hooks/ODL.py (+2/-1) hooks/charmhelpers/contrib/network/ip.py (+10/-4) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+158/-15) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+742/-51) hooks/charmhelpers/contrib/openstack/context.py (+192/-63) hooks/charmhelpers/contrib/openstack/neutron.py (+57/-16) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+12/-6) hooks/charmhelpers/contrib/openstack/templating.py (+32/-29) hooks/charmhelpers/contrib/openstack/utils.py (+324/-33) hooks/charmhelpers/contrib/python/packages.py (+2/-0) hooks/charmhelpers/contrib/storage/linux/ceph.py (+272/-43) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3) hooks/charmhelpers/core/files.py (+45/-0) hooks/charmhelpers/core/hookenv.py (+249/-49) hooks/charmhelpers/core/host.py (+148/-36) hooks/charmhelpers/core/hugepage.py (+33/-16) hooks/charmhelpers/core/kernel.py (+68/-0) hooks/charmhelpers/core/services/base.py (+12/-9) hooks/charmhelpers/core/services/helpers.py (+9/-7) hooks/charmhelpers/core/strutils.py (+30/-0) hooks/charmhelpers/core/templating.py (+12/-12) hooks/charmhelpers/core/unitdata.py (+61/-17) hooks/charmhelpers/fetch/__init__.py (+31/-14) hooks/charmhelpers/fetch/archiveurl.py (+7/-1) hooks/charmhelpers/fetch/giturl.py (+1/-1) hooks/services.py (+28/-0) hooks/vpp_data.py (+75/-0) hooks/vpp_utils.py (+2/-1) metadata.yaml (+4/-0) templates/icehouse/dhcp_agent.ini (+13/-0) templates/icehouse/metadata_agent.ini (+15/-0) templates/icehouse/neutron.conf (+31/-0) templates/parts/rabbitmq (+21/-0) tests/charmhelpers/contrib/amulet/utils.py (+239/-9) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4) tests/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51) unit_tests/test_vpp_utils.py (+2/-1) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Approve | ||
Review via email:
|
Commit message
Description of the change
This merge proposal adds support for serving dhcp and metadata requests to guests.
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
James Page (james-page) : | # |
review:
Needs Information
- 121. By Liam Young
-
Fix typo in context that was retuning the wrong ip for keystone
- 122. By Liam Young
-
ODL initially returns 404s when querying nodes so backoff and retry node query
- 123. By Liam Young
-
Fix bug causing charm to fail if /etc/neutron does not exists
- 124. By Liam Young
-
General tidyup/fixes from mp feedback from JamesPage
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
James Page (james-page) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-hooks.yaml' | |||
2 | --- charm-helpers-hooks.yaml 2015-06-24 09:56:23 +0000 | |||
3 | +++ charm-helpers-hooks.yaml 2015-12-01 15:05:49 +0000 | |||
4 | @@ -1,4 +1,4 @@ | |||
6 | 1 | branch: lp:~gnuoy/charm-helpers/cisco-vpp/ | 1 | branch: lp:charm-helpers |
7 | 2 | destination: hooks/charmhelpers | 2 | destination: hooks/charmhelpers |
8 | 3 | include: | 3 | include: |
9 | 4 | - core | 4 | - core |
10 | 5 | 5 | ||
11 | === modified file 'config.yaml' | |||
12 | --- config.yaml 2015-08-14 07:27:33 +0000 | |||
13 | +++ config.yaml 2015-12-01 15:05:49 +0000 | |||
14 | @@ -41,3 +41,11 @@ | |||
15 | 41 | mac-network-map: | 41 | mac-network-map: |
16 | 42 | default: '' | 42 | default: '' |
17 | 43 | type: string | 43 | type: string |
18 | 44 | rabbit-user: | ||
19 | 45 | default: neutron | ||
20 | 46 | type: string | ||
21 | 47 | description: Username used to access RabbitMQ queue | ||
22 | 48 | rabbit-vhost: | ||
23 | 49 | default: openstack | ||
24 | 50 | type: string | ||
25 | 51 | description: RabbitMQ vhost | ||
26 | 44 | 52 | ||
27 | === modified file 'hooks/ODL.py' | |||
28 | --- hooks/ODL.py 2015-09-14 16:44:47 +0000 | |||
29 | +++ hooks/ODL.py 2015-12-01 15:05:49 +0000 | |||
30 | @@ -73,7 +73,8 @@ | |||
31 | 73 | 73 | ||
32 | 74 | def get_odl_registered_nodes(self): | 74 | def get_odl_registered_nodes(self): |
33 | 75 | log('Querying nodes registered with odl') | 75 | log('Querying nodes registered with odl') |
35 | 76 | odl_req = self.contact_odl('GET', self.node_query_url) | 76 | odl_req = self.contact_odl('GET', self.node_query_url, |
36 | 77 | retry_rcs=[requests.codes.not_found]) | ||
37 | 77 | odl_json = odl_req.json() | 78 | odl_json = odl_req.json() |
38 | 78 | odl_node_ids = [] | 79 | odl_node_ids = [] |
39 | 79 | if odl_json.get('nodes'): | 80 | if odl_json.get('nodes'): |
40 | 80 | 81 | ||
41 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
42 | --- hooks/charmhelpers/contrib/network/ip.py 2015-06-10 15:45:48 +0000 | |||
43 | +++ hooks/charmhelpers/contrib/network/ip.py 2015-12-01 15:05:49 +0000 | |||
44 | @@ -23,7 +23,7 @@ | |||
45 | 23 | from functools import partial | 23 | from functools import partial |
46 | 24 | 24 | ||
47 | 25 | from charmhelpers.core.hookenv import unit_get | 25 | from charmhelpers.core.hookenv import unit_get |
49 | 26 | from charmhelpers.fetch import apt_install | 26 | from charmhelpers.fetch import apt_install, apt_update |
50 | 27 | from charmhelpers.core.hookenv import ( | 27 | from charmhelpers.core.hookenv import ( |
51 | 28 | log, | 28 | log, |
52 | 29 | WARNING, | 29 | WARNING, |
53 | @@ -32,13 +32,15 @@ | |||
54 | 32 | try: | 32 | try: |
55 | 33 | import netifaces | 33 | import netifaces |
56 | 34 | except ImportError: | 34 | except ImportError: |
58 | 35 | apt_install('python-netifaces') | 35 | apt_update(fatal=True) |
59 | 36 | apt_install('python-netifaces', fatal=True) | ||
60 | 36 | import netifaces | 37 | import netifaces |
61 | 37 | 38 | ||
62 | 38 | try: | 39 | try: |
63 | 39 | import netaddr | 40 | import netaddr |
64 | 40 | except ImportError: | 41 | except ImportError: |
66 | 41 | apt_install('python-netaddr') | 42 | apt_update(fatal=True) |
67 | 43 | apt_install('python-netaddr', fatal=True) | ||
68 | 42 | import netaddr | 44 | import netaddr |
69 | 43 | 45 | ||
70 | 44 | 46 | ||
71 | @@ -435,8 +437,12 @@ | |||
72 | 435 | 437 | ||
73 | 436 | rev = dns.reversename.from_address(address) | 438 | rev = dns.reversename.from_address(address) |
74 | 437 | result = ns_query(rev) | 439 | result = ns_query(rev) |
75 | 440 | |||
76 | 438 | if not result: | 441 | if not result: |
78 | 439 | return None | 442 | try: |
79 | 443 | result = socket.gethostbyaddr(address)[0] | ||
80 | 444 | except: | ||
81 | 445 | return None | ||
82 | 440 | else: | 446 | else: |
83 | 441 | result = address | 447 | result = address |
84 | 442 | 448 | ||
85 | 443 | 449 | ||
86 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
87 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-16 07:53:15 +0000 | |||
88 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000 | |||
89 | @@ -14,12 +14,18 @@ | |||
90 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
91 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
92 | 16 | 16 | ||
93 | 17 | import logging | ||
94 | 18 | import re | ||
95 | 19 | import sys | ||
96 | 17 | import six | 20 | import six |
97 | 18 | from collections import OrderedDict | 21 | from collections import OrderedDict |
98 | 19 | from charmhelpers.contrib.amulet.deployment import ( | 22 | from charmhelpers.contrib.amulet.deployment import ( |
99 | 20 | AmuletDeployment | 23 | AmuletDeployment |
100 | 21 | ) | 24 | ) |
101 | 22 | 25 | ||
102 | 26 | DEBUG = logging.DEBUG | ||
103 | 27 | ERROR = logging.ERROR | ||
104 | 28 | |||
105 | 23 | 29 | ||
106 | 24 | class OpenStackAmuletDeployment(AmuletDeployment): | 30 | class OpenStackAmuletDeployment(AmuletDeployment): |
107 | 25 | """OpenStack amulet deployment. | 31 | """OpenStack amulet deployment. |
108 | @@ -28,9 +34,12 @@ | |||
109 | 28 | that is specifically for use by OpenStack charms. | 34 | that is specifically for use by OpenStack charms. |
110 | 29 | """ | 35 | """ |
111 | 30 | 36 | ||
113 | 31 | def __init__(self, series=None, openstack=None, source=None, stable=True): | 37 | def __init__(self, series=None, openstack=None, source=None, |
114 | 38 | stable=True, log_level=DEBUG): | ||
115 | 32 | """Initialize the deployment environment.""" | 39 | """Initialize the deployment environment.""" |
116 | 33 | super(OpenStackAmuletDeployment, self).__init__(series) | 40 | super(OpenStackAmuletDeployment, self).__init__(series) |
117 | 41 | self.log = self.get_logger(level=log_level) | ||
118 | 42 | self.log.info('OpenStackAmuletDeployment: init') | ||
119 | 34 | self.openstack = openstack | 43 | self.openstack = openstack |
120 | 35 | self.source = source | 44 | self.source = source |
121 | 36 | self.stable = stable | 45 | self.stable = stable |
122 | @@ -38,30 +47,55 @@ | |||
123 | 38 | # out. | 47 | # out. |
124 | 39 | self.current_next = "trusty" | 48 | self.current_next = "trusty" |
125 | 40 | 49 | ||
126 | 50 | def get_logger(self, name="deployment-logger", level=logging.DEBUG): | ||
127 | 51 | """Get a logger object that will log to stdout.""" | ||
128 | 52 | log = logging | ||
129 | 53 | logger = log.getLogger(name) | ||
130 | 54 | fmt = log.Formatter("%(asctime)s %(funcName)s " | ||
131 | 55 | "%(levelname)s: %(message)s") | ||
132 | 56 | |||
133 | 57 | handler = log.StreamHandler(stream=sys.stdout) | ||
134 | 58 | handler.setLevel(level) | ||
135 | 59 | handler.setFormatter(fmt) | ||
136 | 60 | |||
137 | 61 | logger.addHandler(handler) | ||
138 | 62 | logger.setLevel(level) | ||
139 | 63 | |||
140 | 64 | return logger | ||
141 | 65 | |||
142 | 41 | def _determine_branch_locations(self, other_services): | 66 | def _determine_branch_locations(self, other_services): |
143 | 42 | """Determine the branch locations for the other services. | 67 | """Determine the branch locations for the other services. |
144 | 43 | 68 | ||
145 | 44 | Determine if the local branch being tested is derived from its | 69 | Determine if the local branch being tested is derived from its |
146 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 70 | stable or next (dev) branch, and based on this, use the corresonding |
147 | 46 | stable or next branches for the other_services.""" | 71 | stable or next branches for the other_services.""" |
149 | 47 | base_charms = ['mysql', 'mongodb'] | 72 | |
150 | 73 | self.log.info('OpenStackAmuletDeployment: determine branch locations') | ||
151 | 74 | |||
152 | 75 | # Charms outside the lp:~openstack-charmers namespace | ||
153 | 76 | base_charms = ['mysql', 'mongodb', 'nrpe'] | ||
154 | 77 | |||
155 | 78 | # Force these charms to current series even when using an older series. | ||
156 | 79 | # ie. Use trusty/nrpe even when series is precise, as the P charm | ||
157 | 80 | # does not possess the necessary external master config and hooks. | ||
158 | 81 | force_series_current = ['nrpe'] | ||
159 | 48 | 82 | ||
160 | 49 | if self.series in ['precise', 'trusty']: | 83 | if self.series in ['precise', 'trusty']: |
161 | 50 | base_series = self.series | 84 | base_series = self.series |
162 | 51 | else: | 85 | else: |
163 | 52 | base_series = self.current_next | 86 | base_series = self.current_next |
164 | 53 | 87 | ||
169 | 54 | if self.stable: | 88 | for svc in other_services: |
170 | 55 | for svc in other_services: | 89 | if svc['name'] in force_series_current: |
171 | 56 | if svc.get('location'): | 90 | base_series = self.current_next |
172 | 57 | continue | 91 | # If a location has been explicitly set, use it |
173 | 92 | if svc.get('location'): | ||
174 | 93 | continue | ||
175 | 94 | if self.stable: | ||
176 | 58 | temp = 'lp:charms/{}/{}' | 95 | temp = 'lp:charms/{}/{}' |
177 | 59 | svc['location'] = temp.format(base_series, | 96 | svc['location'] = temp.format(base_series, |
178 | 60 | svc['name']) | 97 | svc['name']) |
183 | 61 | else: | 98 | else: |
180 | 62 | for svc in other_services: | ||
181 | 63 | if svc.get('location'): | ||
182 | 64 | continue | ||
184 | 65 | if svc['name'] in base_charms: | 99 | if svc['name'] in base_charms: |
185 | 66 | temp = 'lp:charms/{}/{}' | 100 | temp = 'lp:charms/{}/{}' |
186 | 67 | svc['location'] = temp.format(base_series, | 101 | svc['location'] = temp.format(base_series, |
187 | @@ -70,10 +104,13 @@ | |||
188 | 70 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' | 104 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
189 | 71 | svc['location'] = temp.format(self.current_next, | 105 | svc['location'] = temp.format(self.current_next, |
190 | 72 | svc['name']) | 106 | svc['name']) |
191 | 107 | |||
192 | 73 | return other_services | 108 | return other_services |
193 | 74 | 109 | ||
194 | 75 | def _add_services(self, this_service, other_services): | 110 | def _add_services(self, this_service, other_services): |
195 | 76 | """Add services to the deployment and set openstack-origin/source.""" | 111 | """Add services to the deployment and set openstack-origin/source.""" |
196 | 112 | self.log.info('OpenStackAmuletDeployment: adding services') | ||
197 | 113 | |||
198 | 77 | other_services = self._determine_branch_locations(other_services) | 114 | other_services = self._determine_branch_locations(other_services) |
199 | 78 | 115 | ||
200 | 79 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | 116 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
201 | @@ -81,29 +118,102 @@ | |||
202 | 81 | 118 | ||
203 | 82 | services = other_services | 119 | services = other_services |
204 | 83 | services.append(this_service) | 120 | services.append(this_service) |
205 | 121 | |||
206 | 122 | # Charms which should use the source config option | ||
207 | 84 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
208 | 85 | 'ceph-osd', 'ceph-radosgw'] | 124 | 'ceph-osd', 'ceph-radosgw'] |
212 | 86 | # Openstack subordinate charms do not expose an origin option as that | 125 | |
213 | 87 | # is controlled by the principle | 126 | # Charms which can not use openstack-origin, ie. many subordinates |
214 | 88 | ignore = ['neutron-openvswitch', 'cisco-vpp'] | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
215 | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] | ||
216 | 89 | 129 | ||
217 | 90 | if self.openstack: | 130 | if self.openstack: |
218 | 91 | for svc in services: | 131 | for svc in services: |
220 | 92 | if svc['name'] not in use_source + ignore: | 132 | if svc['name'] not in use_source + no_origin: |
221 | 93 | config = {'openstack-origin': self.openstack} | 133 | config = {'openstack-origin': self.openstack} |
222 | 94 | self.d.configure(svc['name'], config) | 134 | self.d.configure(svc['name'], config) |
223 | 95 | 135 | ||
224 | 96 | if self.source: | 136 | if self.source: |
225 | 97 | for svc in services: | 137 | for svc in services: |
227 | 98 | if svc['name'] in use_source and svc['name'] not in ignore: | 138 | if svc['name'] in use_source and svc['name'] not in no_origin: |
228 | 99 | config = {'source': self.source} | 139 | config = {'source': self.source} |
229 | 100 | self.d.configure(svc['name'], config) | 140 | self.d.configure(svc['name'], config) |
230 | 101 | 141 | ||
231 | 102 | def _configure_services(self, configs): | 142 | def _configure_services(self, configs): |
232 | 103 | """Configure all of the services.""" | 143 | """Configure all of the services.""" |
233 | 144 | self.log.info('OpenStackAmuletDeployment: configure services') | ||
234 | 104 | for service, config in six.iteritems(configs): | 145 | for service, config in six.iteritems(configs): |
235 | 105 | self.d.configure(service, config) | 146 | self.d.configure(service, config) |
236 | 106 | 147 | ||
237 | 148 | def _auto_wait_for_status(self, message=None, exclude_services=None, | ||
238 | 149 | include_only=None, timeout=1800): | ||
239 | 150 | """Wait for all units to have a specific extended status, except | ||
240 | 151 | for any defined as excluded. Unless specified via message, any | ||
241 | 152 | status containing any case of 'ready' will be considered a match. | ||
242 | 153 | |||
243 | 154 | Examples of message usage: | ||
244 | 155 | |||
245 | 156 | Wait for all unit status to CONTAIN any case of 'ready' or 'ok': | ||
246 | 157 | message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) | ||
247 | 158 | |||
248 | 159 | Wait for all units to reach this status (exact match): | ||
249 | 160 | message = re.compile('^Unit is ready and clustered$') | ||
250 | 161 | |||
251 | 162 | Wait for all units to reach any one of these (exact match): | ||
252 | 163 | message = re.compile('Unit is ready|OK|Ready') | ||
253 | 164 | |||
254 | 165 | Wait for at least one unit to reach this status (exact match): | ||
255 | 166 | message = {'ready'} | ||
256 | 167 | |||
257 | 168 | See Amulet's sentry.wait_for_messages() for message usage detail. | ||
258 | 169 | https://github.com/juju/amulet/blob/master/amulet/sentry.py | ||
259 | 170 | |||
260 | 171 | :param message: Expected status match | ||
261 | 172 | :param exclude_services: List of juju service names to ignore, | ||
262 | 173 | not to be used in conjuction with include_only. | ||
263 | 174 | :param include_only: List of juju service names to exclusively check, | ||
264 | 175 | not to be used in conjuction with exclude_services. | ||
265 | 176 | :param timeout: Maximum time in seconds to wait for status match | ||
266 | 177 | :returns: None. Raises if timeout is hit. | ||
267 | 178 | """ | ||
268 | 179 | self.log.info('Waiting for extended status on units...') | ||
269 | 180 | |||
270 | 181 | all_services = self.d.services.keys() | ||
271 | 182 | |||
272 | 183 | if exclude_services and include_only: | ||
273 | 184 | raise ValueError('exclude_services can not be used ' | ||
274 | 185 | 'with include_only') | ||
275 | 186 | |||
276 | 187 | if message: | ||
277 | 188 | if isinstance(message, re._pattern_type): | ||
278 | 189 | match = message.pattern | ||
279 | 190 | else: | ||
280 | 191 | match = message | ||
281 | 192 | |||
282 | 193 | self.log.debug('Custom extended status wait match: ' | ||
283 | 194 | '{}'.format(match)) | ||
284 | 195 | else: | ||
285 | 196 | self.log.debug('Default extended status wait match: contains ' | ||
286 | 197 | 'READY (case-insensitive)') | ||
287 | 198 | message = re.compile('.*ready.*', re.IGNORECASE) | ||
288 | 199 | |||
289 | 200 | if exclude_services: | ||
290 | 201 | self.log.debug('Excluding services from extended status match: ' | ||
291 | 202 | '{}'.format(exclude_services)) | ||
292 | 203 | else: | ||
293 | 204 | exclude_services = [] | ||
294 | 205 | |||
295 | 206 | if include_only: | ||
296 | 207 | services = include_only | ||
297 | 208 | else: | ||
298 | 209 | services = list(set(all_services) - set(exclude_services)) | ||
299 | 210 | |||
300 | 211 | self.log.debug('Waiting up to {}s for extended status on services: ' | ||
301 | 212 | '{}'.format(timeout, services)) | ||
302 | 213 | service_messages = {service: message for service in services} | ||
303 | 214 | self.d.sentry.wait_for_messages(service_messages, timeout=timeout) | ||
304 | 215 | self.log.info('OK') | ||
305 | 216 | |||
306 | 107 | def _get_openstack_release(self): | 217 | def _get_openstack_release(self): |
307 | 108 | """Get openstack release. | 218 | """Get openstack release. |
308 | 109 | 219 | ||
309 | @@ -152,3 +262,36 @@ | |||
310 | 152 | return os_origin.split('%s-' % self.series)[1].split('/')[0] | 262 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
311 | 153 | else: | 263 | else: |
312 | 154 | return releases[self.series] | 264 | return releases[self.series] |
313 | 265 | |||
314 | 266 | def get_ceph_expected_pools(self, radosgw=False): | ||
315 | 267 | """Return a list of expected ceph pools in a ceph + cinder + glance | ||
316 | 268 | test scenario, based on OpenStack release and whether ceph radosgw | ||
317 | 269 | is flagged as present or not.""" | ||
318 | 270 | |||
319 | 271 | if self._get_openstack_release() >= self.trusty_kilo: | ||
320 | 272 | # Kilo or later | ||
321 | 273 | pools = [ | ||
322 | 274 | 'rbd', | ||
323 | 275 | 'cinder', | ||
324 | 276 | 'glance' | ||
325 | 277 | ] | ||
326 | 278 | else: | ||
327 | 279 | # Juno or earlier | ||
328 | 280 | pools = [ | ||
329 | 281 | 'data', | ||
330 | 282 | 'metadata', | ||
331 | 283 | 'rbd', | ||
332 | 284 | 'cinder', | ||
333 | 285 | 'glance' | ||
334 | 286 | ] | ||
335 | 287 | |||
336 | 288 | if radosgw: | ||
337 | 289 | pools.extend([ | ||
338 | 290 | '.rgw.root', | ||
339 | 291 | '.rgw.control', | ||
340 | 292 | '.rgw', | ||
341 | 293 | '.rgw.gc', | ||
342 | 294 | '.users.uid' | ||
343 | 295 | ]) | ||
344 | 296 | |||
345 | 297 | return pools | ||
346 | 155 | 298 | ||
347 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
348 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-10 07:35:12 +0000 | |||
349 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-12-01 15:05:49 +0000 | |||
350 | @@ -14,16 +14,22 @@ | |||
351 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
352 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
353 | 16 | 16 | ||
354 | 17 | import amulet | ||
355 | 18 | import json | ||
356 | 17 | import logging | 19 | import logging |
357 | 18 | import os | 20 | import os |
358 | 21 | import re | ||
359 | 22 | import six | ||
360 | 19 | import time | 23 | import time |
361 | 20 | import urllib | 24 | import urllib |
362 | 21 | 25 | ||
363 | 26 | import cinderclient.v1.client as cinder_client | ||
364 | 22 | import glanceclient.v1.client as glance_client | 27 | import glanceclient.v1.client as glance_client |
365 | 28 | import heatclient.v1.client as heat_client | ||
366 | 23 | import keystoneclient.v2_0 as keystone_client | 29 | import keystoneclient.v2_0 as keystone_client |
367 | 24 | import novaclient.v1_1.client as nova_client | 30 | import novaclient.v1_1.client as nova_client |
370 | 25 | 31 | import pika | |
371 | 26 | import six | 32 | import swiftclient |
372 | 27 | 33 | ||
373 | 28 | from charmhelpers.contrib.amulet.utils import ( | 34 | from charmhelpers.contrib.amulet.utils import ( |
374 | 29 | AmuletUtils | 35 | AmuletUtils |
375 | @@ -37,7 +43,7 @@ | |||
376 | 37 | """OpenStack amulet utilities. | 43 | """OpenStack amulet utilities. |
377 | 38 | 44 | ||
378 | 39 | This class inherits from AmuletUtils and has additional support | 45 | This class inherits from AmuletUtils and has additional support |
380 | 40 | that is specifically for use by OpenStack charms. | 46 | that is specifically for use by OpenStack charm tests. |
381 | 41 | """ | 47 | """ |
382 | 42 | 48 | ||
383 | 43 | def __init__(self, log_level=ERROR): | 49 | def __init__(self, log_level=ERROR): |
384 | @@ -51,6 +57,8 @@ | |||
385 | 51 | Validate actual endpoint data vs expected endpoint data. The ports | 57 | Validate actual endpoint data vs expected endpoint data. The ports |
386 | 52 | are used to find the matching endpoint. | 58 | are used to find the matching endpoint. |
387 | 53 | """ | 59 | """ |
388 | 60 | self.log.debug('Validating endpoint data...') | ||
389 | 61 | self.log.debug('actual: {}'.format(repr(endpoints))) | ||
390 | 54 | found = False | 62 | found = False |
391 | 55 | for ep in endpoints: | 63 | for ep in endpoints: |
392 | 56 | self.log.debug('endpoint: {}'.format(repr(ep))) | 64 | self.log.debug('endpoint: {}'.format(repr(ep))) |
393 | @@ -77,6 +85,7 @@ | |||
394 | 77 | Validate a list of actual service catalog endpoints vs a list of | 85 | Validate a list of actual service catalog endpoints vs a list of |
395 | 78 | expected service catalog endpoints. | 86 | expected service catalog endpoints. |
396 | 79 | """ | 87 | """ |
397 | 88 | self.log.debug('Validating service catalog endpoint data...') | ||
398 | 80 | self.log.debug('actual: {}'.format(repr(actual))) | 89 | self.log.debug('actual: {}'.format(repr(actual))) |
399 | 81 | for k, v in six.iteritems(expected): | 90 | for k, v in six.iteritems(expected): |
400 | 82 | if k in actual: | 91 | if k in actual: |
401 | @@ -93,6 +102,7 @@ | |||
402 | 93 | Validate a list of actual tenant data vs list of expected tenant | 102 | Validate a list of actual tenant data vs list of expected tenant |
403 | 94 | data. | 103 | data. |
404 | 95 | """ | 104 | """ |
405 | 105 | self.log.debug('Validating tenant data...') | ||
406 | 96 | self.log.debug('actual: {}'.format(repr(actual))) | 106 | self.log.debug('actual: {}'.format(repr(actual))) |
407 | 97 | for e in expected: | 107 | for e in expected: |
408 | 98 | found = False | 108 | found = False |
409 | @@ -114,6 +124,7 @@ | |||
410 | 114 | Validate a list of actual role data vs a list of expected role | 124 | Validate a list of actual role data vs a list of expected role |
411 | 115 | data. | 125 | data. |
412 | 116 | """ | 126 | """ |
413 | 127 | self.log.debug('Validating role data...') | ||
414 | 117 | self.log.debug('actual: {}'.format(repr(actual))) | 128 | self.log.debug('actual: {}'.format(repr(actual))) |
415 | 118 | for e in expected: | 129 | for e in expected: |
416 | 119 | found = False | 130 | found = False |
417 | @@ -134,6 +145,7 @@ | |||
418 | 134 | Validate a list of actual user data vs a list of expected user | 145 | Validate a list of actual user data vs a list of expected user |
419 | 135 | data. | 146 | data. |
420 | 136 | """ | 147 | """ |
421 | 148 | self.log.debug('Validating user data...') | ||
422 | 137 | self.log.debug('actual: {}'.format(repr(actual))) | 149 | self.log.debug('actual: {}'.format(repr(actual))) |
423 | 138 | for e in expected: | 150 | for e in expected: |
424 | 139 | found = False | 151 | found = False |
425 | @@ -155,17 +167,30 @@ | |||
426 | 155 | 167 | ||
427 | 156 | Validate a list of actual flavors vs a list of expected flavors. | 168 | Validate a list of actual flavors vs a list of expected flavors. |
428 | 157 | """ | 169 | """ |
429 | 170 | self.log.debug('Validating flavor data...') | ||
430 | 158 | self.log.debug('actual: {}'.format(repr(actual))) | 171 | self.log.debug('actual: {}'.format(repr(actual))) |
431 | 159 | act = [a.name for a in actual] | 172 | act = [a.name for a in actual] |
432 | 160 | return self._validate_list_data(expected, act) | 173 | return self._validate_list_data(expected, act) |
433 | 161 | 174 | ||
434 | 162 | def tenant_exists(self, keystone, tenant): | 175 | def tenant_exists(self, keystone, tenant): |
435 | 163 | """Return True if tenant exists.""" | 176 | """Return True if tenant exists.""" |
436 | 177 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) | ||
437 | 164 | return tenant in [t.name for t in keystone.tenants.list()] | 178 | return tenant in [t.name for t in keystone.tenants.list()] |
438 | 165 | 179 | ||
439 | 180 | def authenticate_cinder_admin(self, keystone_sentry, username, | ||
440 | 181 | password, tenant): | ||
441 | 182 | """Authenticates admin user with cinder.""" | ||
442 | 183 | # NOTE(beisner): cinder python client doesn't accept tokens. | ||
443 | 184 | service_ip = \ | ||
444 | 185 | keystone_sentry.relation('shared-db', | ||
445 | 186 | 'mysql:shared-db')['private-address'] | ||
446 | 187 | ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) | ||
447 | 188 | return cinder_client.Client(username, password, tenant, ept) | ||
448 | 189 | |||
449 | 166 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 190 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
450 | 167 | tenant): | 191 | tenant): |
451 | 168 | """Authenticates admin user with the keystone admin endpoint.""" | 192 | """Authenticates admin user with the keystone admin endpoint.""" |
452 | 193 | self.log.debug('Authenticating keystone admin...') | ||
453 | 169 | unit = keystone_sentry | 194 | unit = keystone_sentry |
454 | 170 | service_ip = unit.relation('shared-db', | 195 | service_ip = unit.relation('shared-db', |
455 | 171 | 'mysql:shared-db')['private-address'] | 196 | 'mysql:shared-db')['private-address'] |
456 | @@ -175,6 +200,7 @@ | |||
457 | 175 | 200 | ||
458 | 176 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 201 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
459 | 177 | """Authenticates a regular user with the keystone public endpoint.""" | 202 | """Authenticates a regular user with the keystone public endpoint.""" |
460 | 203 | self.log.debug('Authenticating keystone user ({})...'.format(user)) | ||
461 | 178 | ep = keystone.service_catalog.url_for(service_type='identity', | 204 | ep = keystone.service_catalog.url_for(service_type='identity', |
462 | 179 | endpoint_type='publicURL') | 205 | endpoint_type='publicURL') |
463 | 180 | return keystone_client.Client(username=user, password=password, | 206 | return keystone_client.Client(username=user, password=password, |
464 | @@ -182,19 +208,49 @@ | |||
465 | 182 | 208 | ||
466 | 183 | def authenticate_glance_admin(self, keystone): | 209 | def authenticate_glance_admin(self, keystone): |
467 | 184 | """Authenticates admin user with glance.""" | 210 | """Authenticates admin user with glance.""" |
468 | 211 | self.log.debug('Authenticating glance admin...') | ||
469 | 185 | ep = keystone.service_catalog.url_for(service_type='image', | 212 | ep = keystone.service_catalog.url_for(service_type='image', |
470 | 186 | endpoint_type='adminURL') | 213 | endpoint_type='adminURL') |
471 | 187 | return glance_client.Client(ep, token=keystone.auth_token) | 214 | return glance_client.Client(ep, token=keystone.auth_token) |
472 | 188 | 215 | ||
473 | 216 | def authenticate_heat_admin(self, keystone): | ||
474 | 217 | """Authenticates the admin user with heat.""" | ||
475 | 218 | self.log.debug('Authenticating heat admin...') | ||
476 | 219 | ep = keystone.service_catalog.url_for(service_type='orchestration', | ||
477 | 220 | endpoint_type='publicURL') | ||
478 | 221 | return heat_client.Client(endpoint=ep, token=keystone.auth_token) | ||
479 | 222 | |||
480 | 189 | def authenticate_nova_user(self, keystone, user, password, tenant): | 223 | def authenticate_nova_user(self, keystone, user, password, tenant): |
481 | 190 | """Authenticates a regular user with nova-api.""" | 224 | """Authenticates a regular user with nova-api.""" |
482 | 225 | self.log.debug('Authenticating nova user ({})...'.format(user)) | ||
483 | 191 | ep = keystone.service_catalog.url_for(service_type='identity', | 226 | ep = keystone.service_catalog.url_for(service_type='identity', |
484 | 192 | endpoint_type='publicURL') | 227 | endpoint_type='publicURL') |
485 | 193 | return nova_client.Client(username=user, api_key=password, | 228 | return nova_client.Client(username=user, api_key=password, |
486 | 194 | project_id=tenant, auth_url=ep) | 229 | project_id=tenant, auth_url=ep) |
487 | 195 | 230 | ||
488 | 231 | def authenticate_swift_user(self, keystone, user, password, tenant): | ||
489 | 232 | """Authenticates a regular user with swift api.""" | ||
490 | 233 | self.log.debug('Authenticating swift user ({})...'.format(user)) | ||
491 | 234 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
492 | 235 | endpoint_type='publicURL') | ||
493 | 236 | return swiftclient.Connection(authurl=ep, | ||
494 | 237 | user=user, | ||
495 | 238 | key=password, | ||
496 | 239 | tenant_name=tenant, | ||
497 | 240 | auth_version='2.0') | ||
498 | 241 | |||
499 | 196 | def create_cirros_image(self, glance, image_name): | 242 | def create_cirros_image(self, glance, image_name): |
501 | 197 | """Download the latest cirros image and upload it to glance.""" | 243 | """Download the latest cirros image and upload it to glance, |
502 | 244 | validate and return a resource pointer. | ||
503 | 245 | |||
504 | 246 | :param glance: pointer to authenticated glance connection | ||
505 | 247 | :param image_name: display name for new image | ||
506 | 248 | :returns: glance image pointer | ||
507 | 249 | """ | ||
508 | 250 | self.log.debug('Creating glance cirros image ' | ||
509 | 251 | '({})...'.format(image_name)) | ||
510 | 252 | |||
511 | 253 | # Download cirros image | ||
512 | 198 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | 254 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
513 | 199 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | 255 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
514 | 200 | if http_proxy: | 256 | if http_proxy: |
515 | @@ -203,57 +259,67 @@ | |||
516 | 203 | else: | 259 | else: |
517 | 204 | opener = urllib.FancyURLopener() | 260 | opener = urllib.FancyURLopener() |
518 | 205 | 261 | ||
520 | 206 | f = opener.open("http://download.cirros-cloud.net/version/released") | 262 | f = opener.open('http://download.cirros-cloud.net/version/released') |
521 | 207 | version = f.read().strip() | 263 | version = f.read().strip() |
523 | 208 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) | 264 | cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
524 | 209 | local_path = os.path.join('tests', cirros_img) | 265 | local_path = os.path.join('tests', cirros_img) |
525 | 210 | 266 | ||
526 | 211 | if not os.path.exists(local_path): | 267 | if not os.path.exists(local_path): |
528 | 212 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | 268 | cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
529 | 213 | version, cirros_img) | 269 | version, cirros_img) |
530 | 214 | opener.retrieve(cirros_url, local_path) | 270 | opener.retrieve(cirros_url, local_path) |
531 | 215 | f.close() | 271 | f.close() |
532 | 216 | 272 | ||
533 | 273 | # Create glance image | ||
534 | 217 | with open(local_path) as f: | 274 | with open(local_path) as f: |
535 | 218 | image = glance.images.create(name=image_name, is_public=True, | 275 | image = glance.images.create(name=image_name, is_public=True, |
536 | 219 | disk_format='qcow2', | 276 | disk_format='qcow2', |
537 | 220 | container_format='bare', data=f) | 277 | container_format='bare', data=f) |
550 | 221 | count = 1 | 278 | |
551 | 222 | status = image.status | 279 | # Wait for image to reach active status |
552 | 223 | while status != 'active' and count < 10: | 280 | img_id = image.id |
553 | 224 | time.sleep(3) | 281 | ret = self.resource_reaches_status(glance.images, img_id, |
554 | 225 | image = glance.images.get(image.id) | 282 | expected_stat='active', |
555 | 226 | status = image.status | 283 | msg='Image status wait') |
556 | 227 | self.log.debug('image status: {}'.format(status)) | 284 | if not ret: |
557 | 228 | count += 1 | 285 | msg = 'Glance image failed to reach expected state.' |
558 | 229 | 286 | amulet.raise_status(amulet.FAIL, msg=msg) | |
559 | 230 | if status != 'active': | 287 | |
560 | 231 | self.log.error('image creation timed out') | 288 | # Re-validate new image |
561 | 232 | return None | 289 | self.log.debug('Validating image attributes...') |
562 | 290 | val_img_name = glance.images.get(img_id).name | ||
563 | 291 | val_img_stat = glance.images.get(img_id).status | ||
564 | 292 | val_img_pub = glance.images.get(img_id).is_public | ||
565 | 293 | val_img_cfmt = glance.images.get(img_id).container_format | ||
566 | 294 | val_img_dfmt = glance.images.get(img_id).disk_format | ||
567 | 295 | msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' | ||
568 | 296 | 'container fmt:{} disk fmt:{}'.format( | ||
569 | 297 | val_img_name, val_img_pub, img_id, | ||
570 | 298 | val_img_stat, val_img_cfmt, val_img_dfmt)) | ||
571 | 299 | |||
572 | 300 | if val_img_name == image_name and val_img_stat == 'active' \ | ||
573 | 301 | and val_img_pub is True and val_img_cfmt == 'bare' \ | ||
574 | 302 | and val_img_dfmt == 'qcow2': | ||
575 | 303 | self.log.debug(msg_attr) | ||
576 | 304 | else: | ||
577 | 305 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
578 | 306 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
579 | 233 | 307 | ||
580 | 234 | return image | 308 | return image |
581 | 235 | 309 | ||
582 | 236 | def delete_image(self, glance, image): | 310 | def delete_image(self, glance, image): |
583 | 237 | """Delete the specified image.""" | 311 | """Delete the specified image.""" |
600 | 238 | num_before = len(list(glance.images.list())) | 312 | |
601 | 239 | glance.images.delete(image) | 313 | # /!\ DEPRECATION WARNING |
602 | 240 | 314 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
603 | 241 | count = 1 | 315 | 'delete_resource instead of delete_image.') |
604 | 242 | num_after = len(list(glance.images.list())) | 316 | self.log.debug('Deleting glance image ({})...'.format(image)) |
605 | 243 | while num_after != (num_before - 1) and count < 10: | 317 | return self.delete_resource(glance.images, image, msg='glance image') |
590 | 244 | time.sleep(3) | ||
591 | 245 | num_after = len(list(glance.images.list())) | ||
592 | 246 | self.log.debug('number of images: {}'.format(num_after)) | ||
593 | 247 | count += 1 | ||
594 | 248 | |||
595 | 249 | if num_after != (num_before - 1): | ||
596 | 250 | self.log.error('image deletion timed out') | ||
597 | 251 | return False | ||
598 | 252 | |||
599 | 253 | return True | ||
606 | 254 | 318 | ||
607 | 255 | def create_instance(self, nova, image_name, instance_name, flavor): | 319 | def create_instance(self, nova, image_name, instance_name, flavor): |
608 | 256 | """Create the specified instance.""" | 320 | """Create the specified instance.""" |
609 | 321 | self.log.debug('Creating instance ' | ||
610 | 322 | '({}|{}|{})'.format(instance_name, image_name, flavor)) | ||
611 | 257 | image = nova.images.find(name=image_name) | 323 | image = nova.images.find(name=image_name) |
612 | 258 | flavor = nova.flavors.find(name=flavor) | 324 | flavor = nova.flavors.find(name=flavor) |
613 | 259 | instance = nova.servers.create(name=instance_name, image=image, | 325 | instance = nova.servers.create(name=instance_name, image=image, |
614 | @@ -276,19 +342,644 @@ | |||
615 | 276 | 342 | ||
616 | 277 | def delete_instance(self, nova, instance): | 343 | def delete_instance(self, nova, instance): |
617 | 278 | """Delete the specified instance.""" | 344 | """Delete the specified instance.""" |
634 | 279 | num_before = len(list(nova.servers.list())) | 345 | |
635 | 280 | nova.servers.delete(instance) | 346 | # /!\ DEPRECATION WARNING |
636 | 281 | 347 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
637 | 282 | count = 1 | 348 | 'delete_resource instead of delete_instance.') |
638 | 283 | num_after = len(list(nova.servers.list())) | 349 | self.log.debug('Deleting instance ({})...'.format(instance)) |
639 | 284 | while num_after != (num_before - 1) and count < 10: | 350 | return self.delete_resource(nova.servers, instance, |
640 | 285 | time.sleep(3) | 351 | msg='nova instance') |
641 | 286 | num_after = len(list(nova.servers.list())) | 352 | |
642 | 287 | self.log.debug('number of instances: {}'.format(num_after)) | 353 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
643 | 288 | count += 1 | 354 | """Create a new keypair, or return pointer if it already exists.""" |
644 | 289 | 355 | try: | |
645 | 290 | if num_after != (num_before - 1): | 356 | _keypair = nova.keypairs.get(keypair_name) |
646 | 291 | self.log.error('instance deletion timed out') | 357 | self.log.debug('Keypair ({}) already exists, ' |
647 | 292 | return False | 358 | 'using it.'.format(keypair_name)) |
648 | 293 | 359 | return _keypair | |
649 | 294 | return True | 360 | except: |
650 | 361 | self.log.debug('Keypair ({}) does not exist, ' | ||
651 | 362 | 'creating it.'.format(keypair_name)) | ||
652 | 363 | |||
653 | 364 | _keypair = nova.keypairs.create(name=keypair_name) | ||
654 | 365 | return _keypair | ||
655 | 366 | |||
656 | 367 | def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, | ||
657 | 368 | img_id=None, src_vol_id=None, snap_id=None): | ||
658 | 369 | """Create cinder volume, optionally from a glance image, OR | ||
659 | 370 | optionally as a clone of an existing volume, OR optionally | ||
660 | 371 | from a snapshot. Wait for the new volume status to reach | ||
661 | 372 | the expected status, validate and return a resource pointer. | ||
662 | 373 | |||
663 | 374 | :param vol_name: cinder volume display name | ||
664 | 375 | :param vol_size: size in gigabytes | ||
665 | 376 | :param img_id: optional glance image id | ||
666 | 377 | :param src_vol_id: optional source volume id to clone | ||
667 | 378 | :param snap_id: optional snapshot id to use | ||
668 | 379 | :returns: cinder volume pointer | ||
669 | 380 | """ | ||
670 | 381 | # Handle parameter input and avoid impossible combinations | ||
671 | 382 | if img_id and not src_vol_id and not snap_id: | ||
672 | 383 | # Create volume from image | ||
673 | 384 | self.log.debug('Creating cinder volume from glance image...') | ||
674 | 385 | bootable = 'true' | ||
675 | 386 | elif src_vol_id and not img_id and not snap_id: | ||
676 | 387 | # Clone an existing volume | ||
677 | 388 | self.log.debug('Cloning cinder volume...') | ||
678 | 389 | bootable = cinder.volumes.get(src_vol_id).bootable | ||
679 | 390 | elif snap_id and not src_vol_id and not img_id: | ||
680 | 391 | # Create volume from snapshot | ||
681 | 392 | self.log.debug('Creating cinder volume from snapshot...') | ||
682 | 393 | snap = cinder.volume_snapshots.find(id=snap_id) | ||
683 | 394 | vol_size = snap.size | ||
684 | 395 | snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id | ||
685 | 396 | bootable = cinder.volumes.get(snap_vol_id).bootable | ||
686 | 397 | elif not img_id and not src_vol_id and not snap_id: | ||
687 | 398 | # Create volume | ||
688 | 399 | self.log.debug('Creating cinder volume...') | ||
689 | 400 | bootable = 'false' | ||
690 | 401 | else: | ||
691 | 402 | # Impossible combination of parameters | ||
692 | 403 | msg = ('Invalid method use - name:{} size:{} img_id:{} ' | ||
693 | 404 | 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, | ||
694 | 405 | img_id, src_vol_id, | ||
695 | 406 | snap_id)) | ||
696 | 407 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
697 | 408 | |||
698 | 409 | # Create new volume | ||
699 | 410 | try: | ||
700 | 411 | vol_new = cinder.volumes.create(display_name=vol_name, | ||
701 | 412 | imageRef=img_id, | ||
702 | 413 | size=vol_size, | ||
703 | 414 | source_volid=src_vol_id, | ||
704 | 415 | snapshot_id=snap_id) | ||
705 | 416 | vol_id = vol_new.id | ||
706 | 417 | except Exception as e: | ||
707 | 418 | msg = 'Failed to create volume: {}'.format(e) | ||
708 | 419 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
709 | 420 | |||
710 | 421 | # Wait for volume to reach available status | ||
711 | 422 | ret = self.resource_reaches_status(cinder.volumes, vol_id, | ||
712 | 423 | expected_stat="available", | ||
713 | 424 | msg="Volume status wait") | ||
714 | 425 | if not ret: | ||
715 | 426 | msg = 'Cinder volume failed to reach expected state.' | ||
716 | 427 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
717 | 428 | |||
718 | 429 | # Re-validate new volume | ||
719 | 430 | self.log.debug('Validating volume attributes...') | ||
720 | 431 | val_vol_name = cinder.volumes.get(vol_id).display_name | ||
721 | 432 | val_vol_boot = cinder.volumes.get(vol_id).bootable | ||
722 | 433 | val_vol_stat = cinder.volumes.get(vol_id).status | ||
723 | 434 | val_vol_size = cinder.volumes.get(vol_id).size | ||
724 | 435 | msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' | ||
725 | 436 | '{} size:{}'.format(val_vol_name, vol_id, | ||
726 | 437 | val_vol_stat, val_vol_boot, | ||
727 | 438 | val_vol_size)) | ||
728 | 439 | |||
729 | 440 | if val_vol_boot == bootable and val_vol_stat == 'available' \ | ||
730 | 441 | and val_vol_name == vol_name and val_vol_size == vol_size: | ||
731 | 442 | self.log.debug(msg_attr) | ||
732 | 443 | else: | ||
733 | 444 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
734 | 445 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
735 | 446 | |||
736 | 447 | return vol_new | ||
737 | 448 | |||
738 | 449 | def delete_resource(self, resource, resource_id, | ||
739 | 450 | msg="resource", max_wait=120): | ||
740 | 451 | """Delete one openstack resource, such as one instance, keypair, | ||
741 | 452 | image, volume, stack, etc., and confirm deletion within max wait time. | ||
742 | 453 | |||
743 | 454 | :param resource: pointer to os resource type, ex:glance_client.images | ||
744 | 455 | :param resource_id: unique name or id for the openstack resource | ||
745 | 456 | :param msg: text to identify purpose in logging | ||
746 | 457 | :param max_wait: maximum wait time in seconds | ||
747 | 458 | :returns: True if successful, otherwise False | ||
748 | 459 | """ | ||
749 | 460 | self.log.debug('Deleting OpenStack resource ' | ||
750 | 461 | '{} ({})'.format(resource_id, msg)) | ||
751 | 462 | num_before = len(list(resource.list())) | ||
752 | 463 | resource.delete(resource_id) | ||
753 | 464 | |||
754 | 465 | tries = 0 | ||
755 | 466 | num_after = len(list(resource.list())) | ||
756 | 467 | while num_after != (num_before - 1) and tries < (max_wait / 4): | ||
757 | 468 | self.log.debug('{} delete check: ' | ||
758 | 469 | '{} [{}:{}] {}'.format(msg, tries, | ||
759 | 470 | num_before, | ||
760 | 471 | num_after, | ||
761 | 472 | resource_id)) | ||
762 | 473 | time.sleep(4) | ||
763 | 474 | num_after = len(list(resource.list())) | ||
764 | 475 | tries += 1 | ||
765 | 476 | |||
766 | 477 | self.log.debug('{}: expected, actual count = {}, ' | ||
767 | 478 | '{}'.format(msg, num_before - 1, num_after)) | ||
768 | 479 | |||
769 | 480 | if num_after == (num_before - 1): | ||
770 | 481 | return True | ||
771 | 482 | else: | ||
772 | 483 | self.log.error('{} delete timed out'.format(msg)) | ||
773 | 484 | return False | ||
774 | 485 | |||
775 | 486 | def resource_reaches_status(self, resource, resource_id, | ||
776 | 487 | expected_stat='available', | ||
777 | 488 | msg='resource', max_wait=120): | ||
778 | 489 | """Wait for an openstack resources status to reach an | ||
779 | 490 | expected status within a specified time. Useful to confirm that | ||
780 | 491 | nova instances, cinder vols, snapshots, glance images, heat stacks | ||
781 | 492 | and other resources eventually reach the expected status. | ||
782 | 493 | |||
783 | 494 | :param resource: pointer to os resource type, ex: heat_client.stacks | ||
784 | 495 | :param resource_id: unique id for the openstack resource | ||
785 | 496 | :param expected_stat: status to expect resource to reach | ||
786 | 497 | :param msg: text to identify purpose in logging | ||
787 | 498 | :param max_wait: maximum wait time in seconds | ||
788 | 499 | :returns: True if successful, False if status is not reached | ||
789 | 500 | """ | ||
790 | 501 | |||
791 | 502 | tries = 0 | ||
792 | 503 | resource_stat = resource.get(resource_id).status | ||
793 | 504 | while resource_stat != expected_stat and tries < (max_wait / 4): | ||
794 | 505 | self.log.debug('{} status check: ' | ||
795 | 506 | '{} [{}:{}] {}'.format(msg, tries, | ||
796 | 507 | resource_stat, | ||
797 | 508 | expected_stat, | ||
798 | 509 | resource_id)) | ||
799 | 510 | time.sleep(4) | ||
800 | 511 | resource_stat = resource.get(resource_id).status | ||
801 | 512 | tries += 1 | ||
802 | 513 | |||
803 | 514 | self.log.debug('{}: expected, actual status = {}, ' | ||
804 | 515 | '{}'.format(msg, resource_stat, expected_stat)) | ||
805 | 516 | |||
806 | 517 | if resource_stat == expected_stat: | ||
807 | 518 | return True | ||
808 | 519 | else: | ||
809 | 520 | self.log.debug('{} never reached expected status: ' | ||
810 | 521 | '{}'.format(resource_id, expected_stat)) | ||
811 | 522 | return False | ||
812 | 523 | |||
813 | 524 | def get_ceph_osd_id_cmd(self, index): | ||
814 | 525 | """Produce a shell command that will return a ceph-osd id.""" | ||
815 | 526 | return ("`initctl list | grep 'ceph-osd ' | " | ||
816 | 527 | "awk 'NR=={} {{ print $2 }}' | " | ||
817 | 528 | "grep -o '[0-9]*'`".format(index + 1)) | ||
818 | 529 | |||
819 | 530 | def get_ceph_pools(self, sentry_unit): | ||
820 | 531 | """Return a dict of ceph pools from a single ceph unit, with | ||
821 | 532 | pool name as keys, pool id as vals.""" | ||
822 | 533 | pools = {} | ||
823 | 534 | cmd = 'sudo ceph osd lspools' | ||
824 | 535 | output, code = sentry_unit.run(cmd) | ||
825 | 536 | if code != 0: | ||
826 | 537 | msg = ('{} `{}` returned {} ' | ||
827 | 538 | '{}'.format(sentry_unit.info['unit_name'], | ||
828 | 539 | cmd, code, output)) | ||
829 | 540 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
830 | 541 | |||
831 | 542 | # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, | ||
832 | 543 | for pool in str(output).split(','): | ||
833 | 544 | pool_id_name = pool.split(' ') | ||
834 | 545 | if len(pool_id_name) == 2: | ||
835 | 546 | pool_id = pool_id_name[0] | ||
836 | 547 | pool_name = pool_id_name[1] | ||
837 | 548 | pools[pool_name] = int(pool_id) | ||
838 | 549 | |||
839 | 550 | self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], | ||
840 | 551 | pools)) | ||
841 | 552 | return pools | ||
842 | 553 | |||
843 | 554 | def get_ceph_df(self, sentry_unit): | ||
844 | 555 | """Return dict of ceph df json output, including ceph pool state. | ||
845 | 556 | |||
846 | 557 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
847 | 558 | :returns: Dict of ceph df output | ||
848 | 559 | """ | ||
849 | 560 | cmd = 'sudo ceph df --format=json' | ||
850 | 561 | output, code = sentry_unit.run(cmd) | ||
851 | 562 | if code != 0: | ||
852 | 563 | msg = ('{} `{}` returned {} ' | ||
853 | 564 | '{}'.format(sentry_unit.info['unit_name'], | ||
854 | 565 | cmd, code, output)) | ||
855 | 566 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
856 | 567 | return json.loads(output) | ||
857 | 568 | |||
858 | 569 | def get_ceph_pool_sample(self, sentry_unit, pool_id=0): | ||
859 | 570 | """Take a sample of attributes of a ceph pool, returning ceph | ||
860 | 571 | pool name, object count and disk space used for the specified | ||
861 | 572 | pool ID number. | ||
862 | 573 | |||
863 | 574 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
864 | 575 | :param pool_id: Ceph pool ID | ||
865 | 576 | :returns: List of pool name, object count, kb disk space used | ||
866 | 577 | """ | ||
867 | 578 | df = self.get_ceph_df(sentry_unit) | ||
868 | 579 | pool_name = df['pools'][pool_id]['name'] | ||
869 | 580 | obj_count = df['pools'][pool_id]['stats']['objects'] | ||
870 | 581 | kb_used = df['pools'][pool_id]['stats']['kb_used'] | ||
871 | 582 | self.log.debug('Ceph {} pool (ID {}): {} objects, ' | ||
872 | 583 | '{} kb used'.format(pool_name, pool_id, | ||
873 | 584 | obj_count, kb_used)) | ||
874 | 585 | return pool_name, obj_count, kb_used | ||
875 | 586 | |||
876 | 587 | def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): | ||
877 | 588 | """Validate ceph pool samples taken over time, such as pool | ||
878 | 589 | object counts or pool kb used, before adding, after adding, and | ||
879 | 590 | after deleting items which affect those pool attributes. The | ||
880 | 591 | 2nd element is expected to be greater than the 1st; 3rd is expected | ||
881 | 592 | to be less than the 2nd. | ||
882 | 593 | |||
883 | 594 | :param samples: List containing 3 data samples | ||
884 | 595 | :param sample_type: String for logging and usage context | ||
885 | 596 | :returns: None if successful, Failure message otherwise | ||
886 | 597 | """ | ||
887 | 598 | original, created, deleted = range(3) | ||
888 | 599 | if samples[created] <= samples[original] or \ | ||
889 | 600 | samples[deleted] >= samples[created]: | ||
890 | 601 | return ('Ceph {} samples ({}) ' | ||
891 | 602 | 'unexpected.'.format(sample_type, samples)) | ||
892 | 603 | else: | ||
893 | 604 | self.log.debug('Ceph {} samples (OK): ' | ||
894 | 605 | '{}'.format(sample_type, samples)) | ||
895 | 606 | return None | ||
896 | 607 | |||
897 | 608 | # rabbitmq/amqp specific helpers: | ||
898 | 609 | |||
899 | 610 | def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): | ||
900 | 611 | """Wait for rmq units extended status to show cluster readiness, | ||
901 | 612 | after an optional initial sleep period. Initial sleep is likely | ||
902 | 613 | necessary to be effective following a config change, as status | ||
903 | 614 | message may not instantly update to non-ready.""" | ||
904 | 615 | |||
905 | 616 | if init_sleep: | ||
906 | 617 | time.sleep(init_sleep) | ||
907 | 618 | |||
908 | 619 | message = re.compile('^Unit is ready and clustered$') | ||
909 | 620 | deployment._auto_wait_for_status(message=message, | ||
910 | 621 | timeout=timeout, | ||
911 | 622 | include_only=['rabbitmq-server']) | ||
912 | 623 | |||
913 | 624 | def add_rmq_test_user(self, sentry_units, | ||
914 | 625 | username="testuser1", password="changeme"): | ||
915 | 626 | """Add a test user via the first rmq juju unit, check connection as | ||
916 | 627 | the new user against all sentry units. | ||
917 | 628 | |||
918 | 629 | :param sentry_units: list of sentry unit pointers | ||
919 | 630 | :param username: amqp user name, default to testuser1 | ||
920 | 631 | :param password: amqp user password | ||
921 | 632 | :returns: None if successful. Raise on error. | ||
922 | 633 | """ | ||
923 | 634 | self.log.debug('Adding rmq user ({})...'.format(username)) | ||
924 | 635 | |||
925 | 636 | # Check that user does not already exist | ||
926 | 637 | cmd_user_list = 'rabbitmqctl list_users' | ||
927 | 638 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
928 | 639 | if username in output: | ||
929 | 640 | self.log.warning('User ({}) already exists, returning ' | ||
930 | 641 | 'gracefully.'.format(username)) | ||
931 | 642 | return | ||
932 | 643 | |||
933 | 644 | perms = '".*" ".*" ".*"' | ||
934 | 645 | cmds = ['rabbitmqctl add_user {} {}'.format(username, password), | ||
935 | 646 | 'rabbitmqctl set_permissions {} {}'.format(username, perms)] | ||
936 | 647 | |||
937 | 648 | # Add user via first unit | ||
938 | 649 | for cmd in cmds: | ||
939 | 650 | output, _ = self.run_cmd_unit(sentry_units[0], cmd) | ||
940 | 651 | |||
941 | 652 | # Check connection against the other sentry_units | ||
942 | 653 | self.log.debug('Checking user connect against units...') | ||
943 | 654 | for sentry_unit in sentry_units: | ||
944 | 655 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, | ||
945 | 656 | username=username, | ||
946 | 657 | password=password) | ||
947 | 658 | connection.close() | ||
948 | 659 | |||
949 | 660 | def delete_rmq_test_user(self, sentry_units, username="testuser1"): | ||
950 | 661 | """Delete a rabbitmq user via the first rmq juju unit. | ||
951 | 662 | |||
952 | 663 | :param sentry_units: list of sentry unit pointers | ||
953 | 664 | :param username: amqp user name, default to testuser1 | ||
954 | 665 | :param password: amqp user password | ||
955 | 666 | :returns: None if successful or no such user. | ||
956 | 667 | """ | ||
957 | 668 | self.log.debug('Deleting rmq user ({})...'.format(username)) | ||
958 | 669 | |||
959 | 670 | # Check that the user exists | ||
960 | 671 | cmd_user_list = 'rabbitmqctl list_users' | ||
961 | 672 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
962 | 673 | |||
963 | 674 | if username not in output: | ||
964 | 675 | self.log.warning('User ({}) does not exist, returning ' | ||
965 | 676 | 'gracefully.'.format(username)) | ||
966 | 677 | return | ||
967 | 678 | |||
968 | 679 | # Delete the user | ||
969 | 680 | cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) | ||
970 | 681 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) | ||
971 | 682 | |||
972 | 683 | def get_rmq_cluster_status(self, sentry_unit): | ||
973 | 684 | """Execute rabbitmq cluster status command on a unit and return | ||
974 | 685 | the full output. | ||
975 | 686 | |||
976 | 687 | :param unit: sentry unit | ||
977 | 688 | :returns: String containing console output of cluster status command | ||
978 | 689 | """ | ||
979 | 690 | cmd = 'rabbitmqctl cluster_status' | ||
980 | 691 | output, _ = self.run_cmd_unit(sentry_unit, cmd) | ||
981 | 692 | self.log.debug('{} cluster_status:\n{}'.format( | ||
982 | 693 | sentry_unit.info['unit_name'], output)) | ||
983 | 694 | return str(output) | ||
984 | 695 | |||
985 | 696 | def get_rmq_cluster_running_nodes(self, sentry_unit): | ||
986 | 697 | """Parse rabbitmqctl cluster_status output string, return list of | ||
987 | 698 | running rabbitmq cluster nodes. | ||
988 | 699 | |||
989 | 700 | :param unit: sentry unit | ||
990 | 701 | :returns: List containing node names of running nodes | ||
991 | 702 | """ | ||
992 | 703 | # NOTE(beisner): rabbitmqctl cluster_status output is not | ||
993 | 704 | # json-parsable, do string chop foo, then json.loads that. | ||
994 | 705 | str_stat = self.get_rmq_cluster_status(sentry_unit) | ||
995 | 706 | if 'running_nodes' in str_stat: | ||
996 | 707 | pos_start = str_stat.find("{running_nodes,") + 15 | ||
997 | 708 | pos_end = str_stat.find("]},", pos_start) + 1 | ||
998 | 709 | str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') | ||
999 | 710 | run_nodes = json.loads(str_run_nodes) | ||
1000 | 711 | return run_nodes | ||
1001 | 712 | else: | ||
1002 | 713 | return [] | ||
1003 | 714 | |||
1004 | 715 | def validate_rmq_cluster_running_nodes(self, sentry_units): | ||
1005 | 716 | """Check that all rmq unit hostnames are represented in the | ||
1006 | 717 | cluster_status output of all units. | ||
1007 | 718 | |||
1008 | 719 | :param host_names: dict of juju unit names to host names | ||
1009 | 720 | :param units: list of sentry unit pointers (all rmq units) | ||
1010 | 721 | :returns: None if successful, otherwise return error message | ||
1011 | 722 | """ | ||
1012 | 723 | host_names = self.get_unit_hostnames(sentry_units) | ||
1013 | 724 | errors = [] | ||
1014 | 725 | |||
1015 | 726 | # Query every unit for cluster_status running nodes | ||
1016 | 727 | for query_unit in sentry_units: | ||
1017 | 728 | query_unit_name = query_unit.info['unit_name'] | ||
1018 | 729 | running_nodes = self.get_rmq_cluster_running_nodes(query_unit) | ||
1019 | 730 | |||
1020 | 731 | # Confirm that every unit is represented in the queried unit's | ||
1021 | 732 | # cluster_status running nodes output. | ||
1022 | 733 | for validate_unit in sentry_units: | ||
1023 | 734 | val_host_name = host_names[validate_unit.info['unit_name']] | ||
1024 | 735 | val_node_name = 'rabbit@{}'.format(val_host_name) | ||
1025 | 736 | |||
1026 | 737 | if val_node_name not in running_nodes: | ||
1027 | 738 | errors.append('Cluster member check failed on {}: {} not ' | ||
1028 | 739 | 'in {}\n'.format(query_unit_name, | ||
1029 | 740 | val_node_name, | ||
1030 | 741 | running_nodes)) | ||
1031 | 742 | if errors: | ||
1032 | 743 | return ''.join(errors) | ||
1033 | 744 | |||
1034 | 745 | def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): | ||
1035 | 746 | """Check a single juju rmq unit for ssl and port in the config file.""" | ||
1036 | 747 | host = sentry_unit.info['public-address'] | ||
1037 | 748 | unit_name = sentry_unit.info['unit_name'] | ||
1038 | 749 | |||
1039 | 750 | conf_file = '/etc/rabbitmq/rabbitmq.config' | ||
1040 | 751 | conf_contents = str(self.file_contents_safe(sentry_unit, | ||
1041 | 752 | conf_file, max_wait=16)) | ||
1042 | 753 | # Checks | ||
1043 | 754 | conf_ssl = 'ssl' in conf_contents | ||
1044 | 755 | conf_port = str(port) in conf_contents | ||
1045 | 756 | |||
1046 | 757 | # Port explicitly checked in config | ||
1047 | 758 | if port and conf_port and conf_ssl: | ||
1048 | 759 | self.log.debug('SSL is enabled @{}:{} ' | ||
1049 | 760 | '({})'.format(host, port, unit_name)) | ||
1050 | 761 | return True | ||
1051 | 762 | elif port and not conf_port and conf_ssl: | ||
1052 | 763 | self.log.debug('SSL is enabled @{} but not on port {} ' | ||
1053 | 764 | '({})'.format(host, port, unit_name)) | ||
1054 | 765 | return False | ||
1055 | 766 | # Port not checked (useful when checking that ssl is disabled) | ||
1056 | 767 | elif not port and conf_ssl: | ||
1057 | 768 | self.log.debug('SSL is enabled @{}:{} ' | ||
1058 | 769 | '({})'.format(host, port, unit_name)) | ||
1059 | 770 | return True | ||
1060 | 771 | elif not conf_ssl: | ||
1061 | 772 | self.log.debug('SSL not enabled @{}:{} ' | ||
1062 | 773 | '({})'.format(host, port, unit_name)) | ||
1063 | 774 | return False | ||
1064 | 775 | else: | ||
1065 | 776 | msg = ('Unknown condition when checking SSL status @{}:{} ' | ||
1066 | 777 | '({})'.format(host, port, unit_name)) | ||
1067 | 778 | amulet.raise_status(amulet.FAIL, msg) | ||
1068 | 779 | |||
1069 | 780 | def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): | ||
1070 | 781 | """Check that ssl is enabled on rmq juju sentry units. | ||
1071 | 782 | |||
1072 | 783 | :param sentry_units: list of all rmq sentry units | ||
1073 | 784 | :param port: optional ssl port override to validate | ||
1074 | 785 | :returns: None if successful, otherwise return error message | ||
1075 | 786 | """ | ||
1076 | 787 | for sentry_unit in sentry_units: | ||
1077 | 788 | if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): | ||
1078 | 789 | return ('Unexpected condition: ssl is disabled on unit ' | ||
1079 | 790 | '({})'.format(sentry_unit.info['unit_name'])) | ||
1080 | 791 | return None | ||
1081 | 792 | |||
1082 | 793 | def validate_rmq_ssl_disabled_units(self, sentry_units): | ||
1083 | 794 | """Check that ssl is enabled on listed rmq juju sentry units. | ||
1084 | 795 | |||
1085 | 796 | :param sentry_units: list of all rmq sentry units | ||
1086 | 797 | :returns: True if successful. Raise on error. | ||
1087 | 798 | """ | ||
1088 | 799 | for sentry_unit in sentry_units: | ||
1089 | 800 | if self.rmq_ssl_is_enabled_on_unit(sentry_unit): | ||
1090 | 801 | return ('Unexpected condition: ssl is enabled on unit ' | ||
1091 | 802 | '({})'.format(sentry_unit.info['unit_name'])) | ||
1092 | 803 | return None | ||
1093 | 804 | |||
1094 | 805 | def configure_rmq_ssl_on(self, sentry_units, deployment, | ||
1095 | 806 | port=None, max_wait=60): | ||
1096 | 807 | """Turn ssl charm config option on, with optional non-default | ||
1097 | 808 | ssl port specification. Confirm that it is enabled on every | ||
1098 | 809 | unit. | ||
1099 | 810 | |||
1100 | 811 | :param sentry_units: list of sentry units | ||
1101 | 812 | :param deployment: amulet deployment object pointer | ||
1102 | 813 | :param port: amqp port, use defaults if None | ||
1103 | 814 | :param max_wait: maximum time to wait in seconds to confirm | ||
1104 | 815 | :returns: None if successful. Raise on error. | ||
1105 | 816 | """ | ||
1106 | 817 | self.log.debug('Setting ssl charm config option: on') | ||
1107 | 818 | |||
1108 | 819 | # Enable RMQ SSL | ||
1109 | 820 | config = {'ssl': 'on'} | ||
1110 | 821 | if port: | ||
1111 | 822 | config['ssl_port'] = port | ||
1112 | 823 | |||
1113 | 824 | deployment.d.configure('rabbitmq-server', config) | ||
1114 | 825 | |||
1115 | 826 | # Wait for unit status | ||
1116 | 827 | self.rmq_wait_for_cluster(deployment) | ||
1117 | 828 | |||
1118 | 829 | # Confirm | ||
1119 | 830 | tries = 0 | ||
1120 | 831 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1121 | 832 | while ret and tries < (max_wait / 4): | ||
1122 | 833 | time.sleep(4) | ||
1123 | 834 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1124 | 835 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1125 | 836 | tries += 1 | ||
1126 | 837 | |||
1127 | 838 | if ret: | ||
1128 | 839 | amulet.raise_status(amulet.FAIL, ret) | ||
1129 | 840 | |||
1130 | 841 | def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): | ||
1131 | 842 | """Turn ssl charm config option off, confirm that it is disabled | ||
1132 | 843 | on every unit. | ||
1133 | 844 | |||
1134 | 845 | :param sentry_units: list of sentry units | ||
1135 | 846 | :param deployment: amulet deployment object pointer | ||
1136 | 847 | :param max_wait: maximum time to wait in seconds to confirm | ||
1137 | 848 | :returns: None if successful. Raise on error. | ||
1138 | 849 | """ | ||
1139 | 850 | self.log.debug('Setting ssl charm config option: off') | ||
1140 | 851 | |||
1141 | 852 | # Disable RMQ SSL | ||
1142 | 853 | config = {'ssl': 'off'} | ||
1143 | 854 | deployment.d.configure('rabbitmq-server', config) | ||
1144 | 855 | |||
1145 | 856 | # Wait for unit status | ||
1146 | 857 | self.rmq_wait_for_cluster(deployment) | ||
1147 | 858 | |||
1148 | 859 | # Confirm | ||
1149 | 860 | tries = 0 | ||
1150 | 861 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1151 | 862 | while ret and tries < (max_wait / 4): | ||
1152 | 863 | time.sleep(4) | ||
1153 | 864 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1154 | 865 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1155 | 866 | tries += 1 | ||
1156 | 867 | |||
1157 | 868 | if ret: | ||
1158 | 869 | amulet.raise_status(amulet.FAIL, ret) | ||
1159 | 870 | |||
1160 | 871 | def connect_amqp_by_unit(self, sentry_unit, ssl=False, | ||
1161 | 872 | port=None, fatal=True, | ||
1162 | 873 | username="testuser1", password="changeme"): | ||
1163 | 874 | """Establish and return a pika amqp connection to the rabbitmq service | ||
1164 | 875 | running on a rmq juju unit. | ||
1165 | 876 | |||
1166 | 877 | :param sentry_unit: sentry unit pointer | ||
1167 | 878 | :param ssl: boolean, default to False | ||
1168 | 879 | :param port: amqp port, use defaults if None | ||
1169 | 880 | :param fatal: boolean, default to True (raises on connect error) | ||
1170 | 881 | :param username: amqp user name, default to testuser1 | ||
1171 | 882 | :param password: amqp user password | ||
1172 | 883 | :returns: pika amqp connection pointer or None if failed and non-fatal | ||
1173 | 884 | """ | ||
1174 | 885 | host = sentry_unit.info['public-address'] | ||
1175 | 886 | unit_name = sentry_unit.info['unit_name'] | ||
1176 | 887 | |||
1177 | 888 | # Default port logic if port is not specified | ||
1178 | 889 | if ssl and not port: | ||
1179 | 890 | port = 5671 | ||
1180 | 891 | elif not ssl and not port: | ||
1181 | 892 | port = 5672 | ||
1182 | 893 | |||
1183 | 894 | self.log.debug('Connecting to amqp on {}:{} ({}) as ' | ||
1184 | 895 | '{}...'.format(host, port, unit_name, username)) | ||
1185 | 896 | |||
1186 | 897 | try: | ||
1187 | 898 | credentials = pika.PlainCredentials(username, password) | ||
1188 | 899 | parameters = pika.ConnectionParameters(host=host, port=port, | ||
1189 | 900 | credentials=credentials, | ||
1190 | 901 | ssl=ssl, | ||
1191 | 902 | connection_attempts=3, | ||
1192 | 903 | retry_delay=5, | ||
1193 | 904 | socket_timeout=1) | ||
1194 | 905 | connection = pika.BlockingConnection(parameters) | ||
1195 | 906 | assert connection.server_properties['product'] == 'RabbitMQ' | ||
1196 | 907 | self.log.debug('Connect OK') | ||
1197 | 908 | return connection | ||
1198 | 909 | except Exception as e: | ||
1199 | 910 | msg = ('amqp connection failed to {}:{} as ' | ||
1200 | 911 | '{} ({})'.format(host, port, username, str(e))) | ||
1201 | 912 | if fatal: | ||
1202 | 913 | amulet.raise_status(amulet.FAIL, msg) | ||
1203 | 914 | else: | ||
1204 | 915 | self.log.warn(msg) | ||
1205 | 916 | return None | ||
1206 | 917 | |||
1207 | 918 | def publish_amqp_message_by_unit(self, sentry_unit, message, | ||
1208 | 919 | queue="test", ssl=False, | ||
1209 | 920 | username="testuser1", | ||
1210 | 921 | password="changeme", | ||
1211 | 922 | port=None): | ||
1212 | 923 | """Publish an amqp message to a rmq juju unit. | ||
1213 | 924 | |||
1214 | 925 | :param sentry_unit: sentry unit pointer | ||
1215 | 926 | :param message: amqp message string | ||
1216 | 927 | :param queue: message queue, default to test | ||
1217 | 928 | :param username: amqp user name, default to testuser1 | ||
1218 | 929 | :param password: amqp user password | ||
1219 | 930 | :param ssl: boolean, default to False | ||
1220 | 931 | :param port: amqp port, use defaults if None | ||
1221 | 932 | :returns: None. Raises exception if publish failed. | ||
1222 | 933 | """ | ||
1223 | 934 | self.log.debug('Publishing message to {} queue:\n{}'.format(queue, | ||
1224 | 935 | message)) | ||
1225 | 936 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1226 | 937 | port=port, | ||
1227 | 938 | username=username, | ||
1228 | 939 | password=password) | ||
1229 | 940 | |||
1230 | 941 | # NOTE(beisner): extra debug here re: pika hang potential: | ||
1231 | 942 | # https://github.com/pika/pika/issues/297 | ||
1232 | 943 | # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw | ||
1233 | 944 | self.log.debug('Defining channel...') | ||
1234 | 945 | channel = connection.channel() | ||
1235 | 946 | self.log.debug('Declaring queue...') | ||
1236 | 947 | channel.queue_declare(queue=queue, auto_delete=False, durable=True) | ||
1237 | 948 | self.log.debug('Publishing message...') | ||
1238 | 949 | channel.basic_publish(exchange='', routing_key=queue, body=message) | ||
1239 | 950 | self.log.debug('Closing channel...') | ||
1240 | 951 | channel.close() | ||
1241 | 952 | self.log.debug('Closing connection...') | ||
1242 | 953 | connection.close() | ||
1243 | 954 | |||
1244 | 955 | def get_amqp_message_by_unit(self, sentry_unit, queue="test", | ||
1245 | 956 | username="testuser1", | ||
1246 | 957 | password="changeme", | ||
1247 | 958 | ssl=False, port=None): | ||
1248 | 959 | """Get an amqp message from a rmq juju unit. | ||
1249 | 960 | |||
1250 | 961 | :param sentry_unit: sentry unit pointer | ||
1251 | 962 | :param queue: message queue, default to test | ||
1252 | 963 | :param username: amqp user name, default to testuser1 | ||
1253 | 964 | :param password: amqp user password | ||
1254 | 965 | :param ssl: boolean, default to False | ||
1255 | 966 | :param port: amqp port, use defaults if None | ||
1256 | 967 | :returns: amqp message body as string. Raise if get fails. | ||
1257 | 968 | """ | ||
1258 | 969 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1259 | 970 | port=port, | ||
1260 | 971 | username=username, | ||
1261 | 972 | password=password) | ||
1262 | 973 | channel = connection.channel() | ||
1263 | 974 | method_frame, _, body = channel.basic_get(queue) | ||
1264 | 975 | |||
1265 | 976 | if method_frame: | ||
1266 | 977 | self.log.debug('Retreived message from {} queue:\n{}'.format(queue, | ||
1267 | 978 | body)) | ||
1268 | 979 | channel.basic_ack(method_frame.delivery_tag) | ||
1269 | 980 | channel.close() | ||
1270 | 981 | connection.close() | ||
1271 | 982 | return body | ||
1272 | 983 | else: | ||
1273 | 984 | msg = 'No message retrieved.' | ||
1274 | 985 | amulet.raise_status(amulet.FAIL, msg) | ||
1275 | 295 | 986 | ||
1276 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
1277 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-06-10 07:35:12 +0000 | |||
1278 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-12-01 15:05:49 +0000 | |||
1279 | @@ -14,6 +14,7 @@ | |||
1280 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
1281 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1282 | 16 | 16 | ||
1283 | 17 | import glob | ||
1284 | 17 | import json | 18 | import json |
1285 | 18 | import os | 19 | import os |
1286 | 19 | import re | 20 | import re |
1287 | @@ -50,6 +51,8 @@ | |||
1288 | 50 | from charmhelpers.core.strutils import bool_from_string | 51 | from charmhelpers.core.strutils import bool_from_string |
1289 | 51 | 52 | ||
1290 | 52 | from charmhelpers.core.host import ( | 53 | from charmhelpers.core.host import ( |
1291 | 54 | get_bond_master, | ||
1292 | 55 | is_phy_iface, | ||
1293 | 53 | list_nics, | 56 | list_nics, |
1294 | 54 | get_nic_hwaddr, | 57 | get_nic_hwaddr, |
1295 | 55 | mkdir, | 58 | mkdir, |
1296 | @@ -122,21 +125,24 @@ | |||
1297 | 122 | of specifying multiple key value pairs within the same string. For | 125 | of specifying multiple key value pairs within the same string. For |
1298 | 123 | example, a string in the format of 'key1=value1, key2=value2' will | 126 | example, a string in the format of 'key1=value1, key2=value2' will |
1299 | 124 | return a dict of: | 127 | return a dict of: |
1302 | 125 | {'key1': 'value1', | 128 | |
1303 | 126 | 'key2': 'value2'}. | 129 | {'key1': 'value1', |
1304 | 130 | 'key2': 'value2'}. | ||
1305 | 127 | 131 | ||
1306 | 128 | 2. A string in the above format, but supporting a comma-delimited list | 132 | 2. A string in the above format, but supporting a comma-delimited list |
1307 | 129 | of values for the same key. For example, a string in the format of | 133 | of values for the same key. For example, a string in the format of |
1308 | 130 | 'key1=value1, key2=value3,value4,value5' will return a dict of: | 134 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
1311 | 131 | {'key1', 'value1', | 135 | |
1312 | 132 | 'key2', 'value2,value3,value4'} | 136 | {'key1', 'value1', |
1313 | 137 | 'key2', 'value2,value3,value4'} | ||
1314 | 133 | 138 | ||
1315 | 134 | 3. A string containing a colon character (:) prior to an equal | 139 | 3. A string containing a colon character (:) prior to an equal |
1316 | 135 | character (=) will be treated as yaml and parsed as such. This can be | 140 | character (=) will be treated as yaml and parsed as such. This can be |
1317 | 136 | used to specify more complex key value pairs. For example, | 141 | used to specify more complex key value pairs. For example, |
1318 | 137 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will | 142 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
1319 | 138 | return a dict of: | 143 | return a dict of: |
1321 | 139 | {'key1', 'subkey1=value1, subkey2=value2'} | 144 | |
1322 | 145 | {'key1', 'subkey1=value1, subkey2=value2'} | ||
1323 | 140 | 146 | ||
1324 | 141 | The provided config_flags string may be a list of comma-separated values | 147 | The provided config_flags string may be a list of comma-separated values |
1325 | 142 | which themselves may be comma-separated list of values. | 148 | which themselves may be comma-separated list of values. |
1326 | @@ -189,10 +195,50 @@ | |||
1327 | 189 | class OSContextGenerator(object): | 195 | class OSContextGenerator(object): |
1328 | 190 | """Base class for all context generators.""" | 196 | """Base class for all context generators.""" |
1329 | 191 | interfaces = [] | 197 | interfaces = [] |
1330 | 198 | related = False | ||
1331 | 199 | complete = False | ||
1332 | 200 | missing_data = [] | ||
1333 | 192 | 201 | ||
1334 | 193 | def __call__(self): | 202 | def __call__(self): |
1335 | 194 | raise NotImplementedError | 203 | raise NotImplementedError |
1336 | 195 | 204 | ||
1337 | 205 | def context_complete(self, ctxt): | ||
1338 | 206 | """Check for missing data for the required context data. | ||
1339 | 207 | Set self.missing_data if it exists and return False. | ||
1340 | 208 | Set self.complete if no missing data and return True. | ||
1341 | 209 | """ | ||
1342 | 210 | # Fresh start | ||
1343 | 211 | self.complete = False | ||
1344 | 212 | self.missing_data = [] | ||
1345 | 213 | for k, v in six.iteritems(ctxt): | ||
1346 | 214 | if v is None or v == '': | ||
1347 | 215 | if k not in self.missing_data: | ||
1348 | 216 | self.missing_data.append(k) | ||
1349 | 217 | |||
1350 | 218 | if self.missing_data: | ||
1351 | 219 | self.complete = False | ||
1352 | 220 | log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) | ||
1353 | 221 | else: | ||
1354 | 222 | self.complete = True | ||
1355 | 223 | return self.complete | ||
1356 | 224 | |||
1357 | 225 | def get_related(self): | ||
1358 | 226 | """Check if any of the context interfaces have relation ids. | ||
1359 | 227 | Set self.related and return True if one of the interfaces | ||
1360 | 228 | has relation ids. | ||
1361 | 229 | """ | ||
1362 | 230 | # Fresh start | ||
1363 | 231 | self.related = False | ||
1364 | 232 | try: | ||
1365 | 233 | for interface in self.interfaces: | ||
1366 | 234 | if relation_ids(interface): | ||
1367 | 235 | self.related = True | ||
1368 | 236 | return self.related | ||
1369 | 237 | except AttributeError as e: | ||
1370 | 238 | log("{} {}" | ||
1371 | 239 | "".format(self, e), 'INFO') | ||
1372 | 240 | return self.related | ||
1373 | 241 | |||
1374 | 196 | 242 | ||
1375 | 197 | class SharedDBContext(OSContextGenerator): | 243 | class SharedDBContext(OSContextGenerator): |
1376 | 198 | interfaces = ['shared-db'] | 244 | interfaces = ['shared-db'] |
1377 | @@ -208,6 +254,7 @@ | |||
1378 | 208 | self.database = database | 254 | self.database = database |
1379 | 209 | self.user = user | 255 | self.user = user |
1380 | 210 | self.ssl_dir = ssl_dir | 256 | self.ssl_dir = ssl_dir |
1381 | 257 | self.rel_name = self.interfaces[0] | ||
1382 | 211 | 258 | ||
1383 | 212 | def __call__(self): | 259 | def __call__(self): |
1384 | 213 | self.database = self.database or config('database') | 260 | self.database = self.database or config('database') |
1385 | @@ -240,7 +287,8 @@ | |||
1386 | 240 | if self.relation_prefix: | 287 | if self.relation_prefix: |
1387 | 241 | password_setting = self.relation_prefix + '_password' | 288 | password_setting = self.relation_prefix + '_password' |
1388 | 242 | 289 | ||
1390 | 243 | for rid in relation_ids('shared-db'): | 290 | for rid in relation_ids(self.interfaces[0]): |
1391 | 291 | self.related = True | ||
1392 | 244 | for unit in related_units(rid): | 292 | for unit in related_units(rid): |
1393 | 245 | rdata = relation_get(rid=rid, unit=unit) | 293 | rdata = relation_get(rid=rid, unit=unit) |
1394 | 246 | host = rdata.get('db_host') | 294 | host = rdata.get('db_host') |
1395 | @@ -252,7 +300,7 @@ | |||
1396 | 252 | 'database_password': rdata.get(password_setting), | 300 | 'database_password': rdata.get(password_setting), |
1397 | 253 | 'database_type': 'mysql' | 301 | 'database_type': 'mysql' |
1398 | 254 | } | 302 | } |
1400 | 255 | if context_complete(ctxt): | 303 | if self.context_complete(ctxt): |
1401 | 256 | db_ssl(rdata, ctxt, self.ssl_dir) | 304 | db_ssl(rdata, ctxt, self.ssl_dir) |
1402 | 257 | return ctxt | 305 | return ctxt |
1403 | 258 | return {} | 306 | return {} |
1404 | @@ -273,6 +321,7 @@ | |||
1405 | 273 | 321 | ||
1406 | 274 | ctxt = {} | 322 | ctxt = {} |
1407 | 275 | for rid in relation_ids(self.interfaces[0]): | 323 | for rid in relation_ids(self.interfaces[0]): |
1408 | 324 | self.related = True | ||
1409 | 276 | for unit in related_units(rid): | 325 | for unit in related_units(rid): |
1410 | 277 | rel_host = relation_get('host', rid=rid, unit=unit) | 326 | rel_host = relation_get('host', rid=rid, unit=unit) |
1411 | 278 | rel_user = relation_get('user', rid=rid, unit=unit) | 327 | rel_user = relation_get('user', rid=rid, unit=unit) |
1412 | @@ -282,7 +331,7 @@ | |||
1413 | 282 | 'database_user': rel_user, | 331 | 'database_user': rel_user, |
1414 | 283 | 'database_password': rel_passwd, | 332 | 'database_password': rel_passwd, |
1415 | 284 | 'database_type': 'postgresql'} | 333 | 'database_type': 'postgresql'} |
1417 | 285 | if context_complete(ctxt): | 334 | if self.context_complete(ctxt): |
1418 | 286 | return ctxt | 335 | return ctxt |
1419 | 287 | 336 | ||
1420 | 288 | return {} | 337 | return {} |
1421 | @@ -343,6 +392,7 @@ | |||
1422 | 343 | ctxt['signing_dir'] = cachedir | 392 | ctxt['signing_dir'] = cachedir |
1423 | 344 | 393 | ||
1424 | 345 | for rid in relation_ids(self.rel_name): | 394 | for rid in relation_ids(self.rel_name): |
1425 | 395 | self.related = True | ||
1426 | 346 | for unit in related_units(rid): | 396 | for unit in related_units(rid): |
1427 | 347 | rdata = relation_get(rid=rid, unit=unit) | 397 | rdata = relation_get(rid=rid, unit=unit) |
1428 | 348 | serv_host = rdata.get('service_host') | 398 | serv_host = rdata.get('service_host') |
1429 | @@ -361,7 +411,7 @@ | |||
1430 | 361 | 'service_protocol': svc_protocol, | 411 | 'service_protocol': svc_protocol, |
1431 | 362 | 'auth_protocol': auth_protocol}) | 412 | 'auth_protocol': auth_protocol}) |
1432 | 363 | 413 | ||
1434 | 364 | if context_complete(ctxt): | 414 | if self.context_complete(ctxt): |
1435 | 365 | # NOTE(jamespage) this is required for >= icehouse | 415 | # NOTE(jamespage) this is required for >= icehouse |
1436 | 366 | # so a missing value just indicates keystone needs | 416 | # so a missing value just indicates keystone needs |
1437 | 367 | # upgrading | 417 | # upgrading |
1438 | @@ -400,6 +450,7 @@ | |||
1439 | 400 | ctxt = {} | 450 | ctxt = {} |
1440 | 401 | for rid in relation_ids(self.rel_name): | 451 | for rid in relation_ids(self.rel_name): |
1441 | 402 | ha_vip_only = False | 452 | ha_vip_only = False |
1442 | 453 | self.related = True | ||
1443 | 403 | for unit in related_units(rid): | 454 | for unit in related_units(rid): |
1444 | 404 | if relation_get('clustered', rid=rid, unit=unit): | 455 | if relation_get('clustered', rid=rid, unit=unit): |
1445 | 405 | ctxt['clustered'] = True | 456 | ctxt['clustered'] = True |
1446 | @@ -432,7 +483,7 @@ | |||
1447 | 432 | ha_vip_only = relation_get('ha-vip-only', | 483 | ha_vip_only = relation_get('ha-vip-only', |
1448 | 433 | rid=rid, unit=unit) is not None | 484 | rid=rid, unit=unit) is not None |
1449 | 434 | 485 | ||
1451 | 435 | if context_complete(ctxt): | 486 | if self.context_complete(ctxt): |
1452 | 436 | if 'rabbit_ssl_ca' in ctxt: | 487 | if 'rabbit_ssl_ca' in ctxt: |
1453 | 437 | if not self.ssl_dir: | 488 | if not self.ssl_dir: |
1454 | 438 | log("Charm not setup for ssl support but ssl ca " | 489 | log("Charm not setup for ssl support but ssl ca " |
1455 | @@ -464,7 +515,7 @@ | |||
1456 | 464 | ctxt['oslo_messaging_flags'] = config_flags_parser( | 515 | ctxt['oslo_messaging_flags'] = config_flags_parser( |
1457 | 465 | oslo_messaging_flags) | 516 | oslo_messaging_flags) |
1458 | 466 | 517 | ||
1460 | 467 | if not context_complete(ctxt): | 518 | if not self.complete: |
1461 | 468 | return {} | 519 | return {} |
1462 | 469 | 520 | ||
1463 | 470 | return ctxt | 521 | return ctxt |
1464 | @@ -480,13 +531,15 @@ | |||
1465 | 480 | 531 | ||
1466 | 481 | log('Generating template context for ceph', level=DEBUG) | 532 | log('Generating template context for ceph', level=DEBUG) |
1467 | 482 | mon_hosts = [] | 533 | mon_hosts = [] |
1471 | 483 | auth = None | 534 | ctxt = { |
1472 | 484 | key = None | 535 | 'use_syslog': str(config('use-syslog')).lower() |
1473 | 485 | use_syslog = str(config('use-syslog')).lower() | 536 | } |
1474 | 486 | for rid in relation_ids('ceph'): | 537 | for rid in relation_ids('ceph'): |
1475 | 487 | for unit in related_units(rid): | 538 | for unit in related_units(rid): |
1478 | 488 | auth = relation_get('auth', rid=rid, unit=unit) | 539 | if not ctxt.get('auth'): |
1479 | 489 | key = relation_get('key', rid=rid, unit=unit) | 540 | ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) |
1480 | 541 | if not ctxt.get('key'): | ||
1481 | 542 | ctxt['key'] = relation_get('key', rid=rid, unit=unit) | ||
1482 | 490 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, | 543 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
1483 | 491 | unit=unit) | 544 | unit=unit) |
1484 | 492 | unit_priv_addr = relation_get('private-address', rid=rid, | 545 | unit_priv_addr = relation_get('private-address', rid=rid, |
1485 | @@ -495,15 +548,12 @@ | |||
1486 | 495 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | 548 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
1487 | 496 | mon_hosts.append(ceph_addr) | 549 | mon_hosts.append(ceph_addr) |
1488 | 497 | 550 | ||
1493 | 498 | ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), | 551 | ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) |
1490 | 499 | 'auth': auth, | ||
1491 | 500 | 'key': key, | ||
1492 | 501 | 'use_syslog': use_syslog} | ||
1494 | 502 | 552 | ||
1495 | 503 | if not os.path.isdir('/etc/ceph'): | 553 | if not os.path.isdir('/etc/ceph'): |
1496 | 504 | os.mkdir('/etc/ceph') | 554 | os.mkdir('/etc/ceph') |
1497 | 505 | 555 | ||
1499 | 506 | if not context_complete(ctxt): | 556 | if not self.context_complete(ctxt): |
1500 | 507 | return {} | 557 | return {} |
1501 | 508 | 558 | ||
1502 | 509 | ensure_packages(['ceph-common']) | 559 | ensure_packages(['ceph-common']) |
1503 | @@ -890,9 +940,32 @@ | |||
1504 | 890 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} | 940 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
1505 | 891 | return ctxt | 941 | return ctxt |
1506 | 892 | 942 | ||
1507 | 943 | def pg_ctxt(self): | ||
1508 | 944 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1509 | 945 | self.network_manager) | ||
1510 | 946 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1511 | 947 | self.network_manager) | ||
1512 | 948 | ovs_ctxt = {'core_plugin': driver, | ||
1513 | 949 | 'neutron_plugin': 'plumgrid', | ||
1514 | 950 | 'neutron_security_groups': self.neutron_security_groups, | ||
1515 | 951 | 'local_ip': unit_private_ip(), | ||
1516 | 952 | 'config': config} | ||
1517 | 953 | return ovs_ctxt | ||
1518 | 954 | |||
1519 | 955 | def midonet_ctxt(self): | ||
1520 | 956 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1521 | 957 | self.network_manager) | ||
1522 | 958 | midonet_config = neutron_plugin_attribute(self.plugin, 'config', | ||
1523 | 959 | self.network_manager) | ||
1524 | 960 | mido_ctxt = {'core_plugin': driver, | ||
1525 | 961 | 'neutron_plugin': 'midonet', | ||
1526 | 962 | 'neutron_security_groups': self.neutron_security_groups, | ||
1527 | 963 | 'local_ip': unit_private_ip(), | ||
1528 | 964 | 'config': midonet_config} | ||
1529 | 965 | |||
1530 | 966 | return mido_ctxt | ||
1531 | 967 | |||
1532 | 893 | def __call__(self): | 968 | def __call__(self): |
1533 | 894 | self._ensure_packages() | ||
1534 | 895 | |||
1535 | 896 | if self.network_manager not in ['quantum', 'neutron']: | 969 | if self.network_manager not in ['quantum', 'neutron']: |
1536 | 897 | return {} | 970 | return {} |
1537 | 898 | 971 | ||
1538 | @@ -911,6 +984,10 @@ | |||
1539 | 911 | ctxt.update(self.calico_ctxt()) | 984 | ctxt.update(self.calico_ctxt()) |
1540 | 912 | elif self.plugin == 'vsp': | 985 | elif self.plugin == 'vsp': |
1541 | 913 | ctxt.update(self.nuage_ctxt()) | 986 | ctxt.update(self.nuage_ctxt()) |
1542 | 987 | elif self.plugin == 'plumgrid': | ||
1543 | 988 | ctxt.update(self.pg_ctxt()) | ||
1544 | 989 | elif self.plugin == 'midonet': | ||
1545 | 990 | ctxt.update(self.midonet_ctxt()) | ||
1546 | 914 | 991 | ||
1547 | 915 | alchemy_flags = config('neutron-alchemy-flags') | 992 | alchemy_flags = config('neutron-alchemy-flags') |
1548 | 916 | if alchemy_flags: | 993 | if alchemy_flags: |
1549 | @@ -922,7 +999,6 @@ | |||
1550 | 922 | 999 | ||
1551 | 923 | 1000 | ||
1552 | 924 | class NeutronPortContext(OSContextGenerator): | 1001 | class NeutronPortContext(OSContextGenerator): |
1553 | 925 | NIC_PREFIXES = ['eth', 'bond'] | ||
1554 | 926 | 1002 | ||
1555 | 927 | def resolve_ports(self, ports): | 1003 | def resolve_ports(self, ports): |
1556 | 928 | """Resolve NICs not yet bound to bridge(s) | 1004 | """Resolve NICs not yet bound to bridge(s) |
1557 | @@ -934,7 +1010,18 @@ | |||
1558 | 934 | 1010 | ||
1559 | 935 | hwaddr_to_nic = {} | 1011 | hwaddr_to_nic = {} |
1560 | 936 | hwaddr_to_ip = {} | 1012 | hwaddr_to_ip = {} |
1562 | 937 | for nic in list_nics(self.NIC_PREFIXES): | 1013 | for nic in list_nics(): |
1563 | 1014 | # Ignore virtual interfaces (bond masters will be identified from | ||
1564 | 1015 | # their slaves) | ||
1565 | 1016 | if not is_phy_iface(nic): | ||
1566 | 1017 | continue | ||
1567 | 1018 | |||
1568 | 1019 | _nic = get_bond_master(nic) | ||
1569 | 1020 | if _nic: | ||
1570 | 1021 | log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), | ||
1571 | 1022 | level=DEBUG) | ||
1572 | 1023 | nic = _nic | ||
1573 | 1024 | |||
1574 | 938 | hwaddr = get_nic_hwaddr(nic) | 1025 | hwaddr = get_nic_hwaddr(nic) |
1575 | 939 | hwaddr_to_nic[hwaddr] = nic | 1026 | hwaddr_to_nic[hwaddr] = nic |
1576 | 940 | addresses = get_ipv4_addr(nic, fatal=False) | 1027 | addresses = get_ipv4_addr(nic, fatal=False) |
1577 | @@ -960,7 +1047,8 @@ | |||
1578 | 960 | # trust it to be the real external network). | 1047 | # trust it to be the real external network). |
1579 | 961 | resolved.append(entry) | 1048 | resolved.append(entry) |
1580 | 962 | 1049 | ||
1582 | 963 | return resolved | 1050 | # Ensure no duplicates |
1583 | 1051 | return list(set(resolved)) | ||
1584 | 964 | 1052 | ||
1585 | 965 | 1053 | ||
1586 | 966 | class OSConfigFlagContext(OSContextGenerator): | 1054 | class OSConfigFlagContext(OSContextGenerator): |
1587 | @@ -1000,6 +1088,20 @@ | |||
1588 | 1000 | config_flags_parser(config_flags)} | 1088 | config_flags_parser(config_flags)} |
1589 | 1001 | 1089 | ||
1590 | 1002 | 1090 | ||
1591 | 1091 | class LibvirtConfigFlagsContext(OSContextGenerator): | ||
1592 | 1092 | """ | ||
1593 | 1093 | This context provides support for extending | ||
1594 | 1094 | the libvirt section through user-defined flags. | ||
1595 | 1095 | """ | ||
1596 | 1096 | def __call__(self): | ||
1597 | 1097 | ctxt = {} | ||
1598 | 1098 | libvirt_flags = config('libvirt-flags') | ||
1599 | 1099 | if libvirt_flags: | ||
1600 | 1100 | ctxt['libvirt_flags'] = config_flags_parser( | ||
1601 | 1101 | libvirt_flags) | ||
1602 | 1102 | return ctxt | ||
1603 | 1103 | |||
1604 | 1104 | |||
1605 | 1003 | class SubordinateConfigContext(OSContextGenerator): | 1105 | class SubordinateConfigContext(OSContextGenerator): |
1606 | 1004 | 1106 | ||
1607 | 1005 | """ | 1107 | """ |
1608 | @@ -1032,7 +1134,7 @@ | |||
1609 | 1032 | 1134 | ||
1610 | 1033 | ctxt = { | 1135 | ctxt = { |
1611 | 1034 | ... other context ... | 1136 | ... other context ... |
1613 | 1035 | 'subordinate_config': { | 1137 | 'subordinate_configuration': { |
1614 | 1036 | 'DEFAULT': { | 1138 | 'DEFAULT': { |
1615 | 1037 | 'key1': 'value1', | 1139 | 'key1': 'value1', |
1616 | 1038 | }, | 1140 | }, |
1617 | @@ -1050,13 +1152,22 @@ | |||
1618 | 1050 | :param config_file : Service's config file to query sections | 1152 | :param config_file : Service's config file to query sections |
1619 | 1051 | :param interface : Subordinate interface to inspect | 1153 | :param interface : Subordinate interface to inspect |
1620 | 1052 | """ | 1154 | """ |
1621 | 1053 | self.service = service | ||
1622 | 1054 | self.config_file = config_file | 1155 | self.config_file = config_file |
1624 | 1055 | self.interface = interface | 1156 | if isinstance(service, list): |
1625 | 1157 | self.services = service | ||
1626 | 1158 | else: | ||
1627 | 1159 | self.services = [service] | ||
1628 | 1160 | if isinstance(interface, list): | ||
1629 | 1161 | self.interfaces = interface | ||
1630 | 1162 | else: | ||
1631 | 1163 | self.interfaces = [interface] | ||
1632 | 1056 | 1164 | ||
1633 | 1057 | def __call__(self): | 1165 | def __call__(self): |
1634 | 1058 | ctxt = {'sections': {}} | 1166 | ctxt = {'sections': {}} |
1636 | 1059 | for rid in relation_ids(self.interface): | 1167 | rids = [] |
1637 | 1168 | for interface in self.interfaces: | ||
1638 | 1169 | rids.extend(relation_ids(interface)) | ||
1639 | 1170 | for rid in rids: | ||
1640 | 1060 | for unit in related_units(rid): | 1171 | for unit in related_units(rid): |
1641 | 1061 | sub_config = relation_get('subordinate_configuration', | 1172 | sub_config = relation_get('subordinate_configuration', |
1642 | 1062 | rid=rid, unit=unit) | 1173 | rid=rid, unit=unit) |
1643 | @@ -1064,33 +1175,37 @@ | |||
1644 | 1064 | try: | 1175 | try: |
1645 | 1065 | sub_config = json.loads(sub_config) | 1176 | sub_config = json.loads(sub_config) |
1646 | 1066 | except: | 1177 | except: |
1674 | 1067 | log('Could not parse JSON from subordinate_config ' | 1178 | log('Could not parse JSON from ' |
1675 | 1068 | 'setting from %s' % rid, level=ERROR) | 1179 | 'subordinate_configuration setting from %s' |
1676 | 1069 | continue | 1180 | % rid, level=ERROR) |
1677 | 1070 | 1181 | continue | |
1678 | 1071 | if self.service not in sub_config: | 1182 | |
1679 | 1072 | log('Found subordinate_config on %s but it contained' | 1183 | for service in self.services: |
1680 | 1073 | 'nothing for %s service' % (rid, self.service), | 1184 | if service not in sub_config: |
1681 | 1074 | level=INFO) | 1185 | log('Found subordinate_configuration on %s but it ' |
1682 | 1075 | continue | 1186 | 'contained nothing for %s service' |
1683 | 1076 | 1187 | % (rid, service), level=INFO) | |
1684 | 1077 | sub_config = sub_config[self.service] | 1188 | continue |
1685 | 1078 | if self.config_file not in sub_config: | 1189 | |
1686 | 1079 | log('Found subordinate_config on %s but it contained' | 1190 | sub_config = sub_config[service] |
1687 | 1080 | 'nothing for %s' % (rid, self.config_file), | 1191 | if self.config_file not in sub_config: |
1688 | 1081 | level=INFO) | 1192 | log('Found subordinate_configuration on %s but it ' |
1689 | 1082 | continue | 1193 | 'contained nothing for %s' |
1690 | 1083 | 1194 | % (rid, self.config_file), level=INFO) | |
1691 | 1084 | sub_config = sub_config[self.config_file] | 1195 | continue |
1692 | 1085 | for k, v in six.iteritems(sub_config): | 1196 | |
1693 | 1086 | if k == 'sections': | 1197 | sub_config = sub_config[self.config_file] |
1694 | 1087 | for section, config_dict in six.iteritems(v): | 1198 | for k, v in six.iteritems(sub_config): |
1695 | 1088 | log("adding section '%s'" % (section), | 1199 | if k == 'sections': |
1696 | 1089 | level=DEBUG) | 1200 | for section, config_list in six.iteritems(v): |
1697 | 1090 | ctxt[k][section] = config_dict | 1201 | log("adding section '%s'" % (section), |
1698 | 1091 | else: | 1202 | level=DEBUG) |
1699 | 1092 | ctxt[k] = v | 1203 | if ctxt[k].get(section): |
1700 | 1093 | 1204 | ctxt[k][section].extend(config_list) | |
1701 | 1205 | else: | ||
1702 | 1206 | ctxt[k][section] = config_list | ||
1703 | 1207 | else: | ||
1704 | 1208 | ctxt[k] = v | ||
1705 | 1094 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | 1209 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
1706 | 1095 | return ctxt | 1210 | return ctxt |
1707 | 1096 | 1211 | ||
1708 | @@ -1267,15 +1382,19 @@ | |||
1709 | 1267 | def __call__(self): | 1382 | def __call__(self): |
1710 | 1268 | ports = config('data-port') | 1383 | ports = config('data-port') |
1711 | 1269 | if ports: | 1384 | if ports: |
1712 | 1385 | # Map of {port/mac:bridge} | ||
1713 | 1270 | portmap = parse_data_port_mappings(ports) | 1386 | portmap = parse_data_port_mappings(ports) |
1715 | 1271 | ports = portmap.values() | 1387 | ports = portmap.keys() |
1716 | 1388 | # Resolve provided ports or mac addresses and filter out those | ||
1717 | 1389 | # already attached to a bridge. | ||
1718 | 1272 | resolved = self.resolve_ports(ports) | 1390 | resolved = self.resolve_ports(ports) |
1719 | 1391 | # FIXME: is this necessary? | ||
1720 | 1273 | normalized = {get_nic_hwaddr(port): port for port in resolved | 1392 | normalized = {get_nic_hwaddr(port): port for port in resolved |
1721 | 1274 | if port not in ports} | 1393 | if port not in ports} |
1722 | 1275 | normalized.update({port: port for port in resolved | 1394 | normalized.update({port: port for port in resolved |
1723 | 1276 | if port in ports}) | 1395 | if port in ports}) |
1724 | 1277 | if resolved: | 1396 | if resolved: |
1726 | 1278 | return {bridge: normalized[port] for bridge, port in | 1397 | return {normalized[port]: bridge for port, bridge in |
1727 | 1279 | six.iteritems(portmap) if port in normalized.keys()} | 1398 | six.iteritems(portmap) if port in normalized.keys()} |
1728 | 1280 | 1399 | ||
1729 | 1281 | return None | 1400 | return None |
1730 | @@ -1286,12 +1405,22 @@ | |||
1731 | 1286 | def __call__(self): | 1405 | def __call__(self): |
1732 | 1287 | ctxt = {} | 1406 | ctxt = {} |
1733 | 1288 | mappings = super(PhyNICMTUContext, self).__call__() | 1407 | mappings = super(PhyNICMTUContext, self).__call__() |
1736 | 1289 | if mappings and mappings.values(): | 1408 | if mappings and mappings.keys(): |
1737 | 1290 | ports = mappings.values() | 1409 | ports = sorted(mappings.keys()) |
1738 | 1291 | napi_settings = NeutronAPIContext()() | 1410 | napi_settings = NeutronAPIContext()() |
1739 | 1292 | mtu = napi_settings.get('network_device_mtu') | 1411 | mtu = napi_settings.get('network_device_mtu') |
1740 | 1412 | all_ports = set() | ||
1741 | 1413 | # If any of ports is a vlan device, its underlying device must have | ||
1742 | 1414 | # mtu applied first. | ||
1743 | 1415 | for port in ports: | ||
1744 | 1416 | for lport in glob.glob("/sys/class/net/%s/lower_*" % port): | ||
1745 | 1417 | lport = os.path.basename(lport) | ||
1746 | 1418 | all_ports.add(lport.split('_')[1]) | ||
1747 | 1419 | |||
1748 | 1420 | all_ports = list(all_ports) | ||
1749 | 1421 | all_ports.extend(ports) | ||
1750 | 1293 | if mtu: | 1422 | if mtu: |
1752 | 1294 | ctxt["devs"] = '\\n'.join(ports) | 1423 | ctxt["devs"] = '\\n'.join(all_ports) |
1753 | 1295 | ctxt['mtu'] = mtu | 1424 | ctxt['mtu'] = mtu |
1754 | 1296 | 1425 | ||
1755 | 1297 | return ctxt | 1426 | return ctxt |
1756 | @@ -1323,6 +1452,6 @@ | |||
1757 | 1323 | 'auth_protocol': | 1452 | 'auth_protocol': |
1758 | 1324 | rdata.get('auth_protocol') or 'http', | 1453 | rdata.get('auth_protocol') or 'http', |
1759 | 1325 | } | 1454 | } |
1761 | 1326 | if context_complete(ctxt): | 1455 | if self.context_complete(ctxt): |
1762 | 1327 | return ctxt | 1456 | return ctxt |
1763 | 1328 | return {} | 1457 | return {} |
1764 | 1329 | 1458 | ||
1765 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
1766 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-10 15:45:48 +0000 | |||
1767 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-12-01 15:05:49 +0000 | |||
1768 | @@ -195,6 +195,34 @@ | |||
1769 | 195 | 'packages': [], | 195 | 'packages': [], |
1770 | 196 | 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], | 196 | 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], |
1771 | 197 | 'server_services': ['neutron-server'] | 197 | 'server_services': ['neutron-server'] |
1772 | 198 | }, | ||
1773 | 199 | 'plumgrid': { | ||
1774 | 200 | 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', | ||
1775 | 201 | 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', | ||
1776 | 202 | 'contexts': [ | ||
1777 | 203 | context.SharedDBContext(user=config('database-user'), | ||
1778 | 204 | database=config('database'), | ||
1779 | 205 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1780 | 206 | 'services': [], | ||
1781 | 207 | 'packages': ['plumgrid-lxc', | ||
1782 | 208 | 'iovisor-dkms'], | ||
1783 | 209 | 'server_packages': ['neutron-server', | ||
1784 | 210 | 'neutron-plugin-plumgrid'], | ||
1785 | 211 | 'server_services': ['neutron-server'] | ||
1786 | 212 | }, | ||
1787 | 213 | 'midonet': { | ||
1788 | 214 | 'config': '/etc/neutron/plugins/midonet/midonet.ini', | ||
1789 | 215 | 'driver': 'midonet.neutron.plugin.MidonetPluginV2', | ||
1790 | 216 | 'contexts': [ | ||
1791 | 217 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1792 | 218 | database=config('neutron-database'), | ||
1793 | 219 | relation_prefix='neutron', | ||
1794 | 220 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1795 | 221 | 'services': [], | ||
1796 | 222 | 'packages': [[headers_package()] + determine_dkms_package()], | ||
1797 | 223 | 'server_packages': ['neutron-server', | ||
1798 | 224 | 'python-neutron-plugin-midonet'], | ||
1799 | 225 | 'server_services': ['neutron-server'] | ||
1800 | 198 | } | 226 | } |
1801 | 199 | } | 227 | } |
1802 | 200 | if release >= 'icehouse': | 228 | if release >= 'icehouse': |
1803 | @@ -255,17 +283,30 @@ | |||
1804 | 255 | return 'neutron' | 283 | return 'neutron' |
1805 | 256 | 284 | ||
1806 | 257 | 285 | ||
1808 | 258 | def parse_mappings(mappings): | 286 | def parse_mappings(mappings, key_rvalue=False): |
1809 | 287 | """By default mappings are lvalue keyed. | ||
1810 | 288 | |||
1811 | 289 | If key_rvalue is True, the mapping will be reversed to allow multiple | ||
1812 | 290 | configs for the same lvalue. | ||
1813 | 291 | """ | ||
1814 | 259 | parsed = {} | 292 | parsed = {} |
1815 | 260 | if mappings: | 293 | if mappings: |
1816 | 261 | mappings = mappings.split() | 294 | mappings = mappings.split() |
1817 | 262 | for m in mappings: | 295 | for m in mappings: |
1818 | 263 | p = m.partition(':') | 296 | p = m.partition(':') |
1822 | 264 | key = p[0].strip() | 297 | |
1823 | 265 | if p[1]: | 298 | if key_rvalue: |
1824 | 266 | parsed[key] = p[2].strip() | 299 | key_index = 2 |
1825 | 300 | val_index = 0 | ||
1826 | 301 | # if there is no rvalue skip to next | ||
1827 | 302 | if not p[1]: | ||
1828 | 303 | continue | ||
1829 | 267 | else: | 304 | else: |
1831 | 268 | parsed[key] = '' | 305 | key_index = 0 |
1832 | 306 | val_index = 2 | ||
1833 | 307 | |||
1834 | 308 | key = p[key_index].strip() | ||
1835 | 309 | parsed[key] = p[val_index].strip() | ||
1836 | 269 | 310 | ||
1837 | 270 | return parsed | 311 | return parsed |
1838 | 271 | 312 | ||
1839 | @@ -283,25 +324,25 @@ | |||
1840 | 283 | def parse_data_port_mappings(mappings, default_bridge='br-data'): | 324 | def parse_data_port_mappings(mappings, default_bridge='br-data'): |
1841 | 284 | """Parse data port mappings. | 325 | """Parse data port mappings. |
1842 | 285 | 326 | ||
1844 | 286 | Mappings must be a space-delimited list of bridge:port mappings. | 327 | Mappings must be a space-delimited list of bridge:port. |
1845 | 287 | 328 | ||
1847 | 288 | Returns dict of the form {bridge:port}. | 329 | Returns dict of the form {port:bridge} where ports may be mac addresses or |
1848 | 330 | interface names. | ||
1849 | 289 | """ | 331 | """ |
1851 | 290 | _mappings = parse_mappings(mappings) | 332 | |
1852 | 333 | # NOTE(dosaboy): we use rvalue for key to allow multiple values to be | ||
1853 | 334 | # proposed for <port> since it may be a mac address which will differ | ||
1854 | 335 | # across units this allowing first-known-good to be chosen. | ||
1855 | 336 | _mappings = parse_mappings(mappings, key_rvalue=True) | ||
1856 | 291 | if not _mappings or list(_mappings.values()) == ['']: | 337 | if not _mappings or list(_mappings.values()) == ['']: |
1857 | 292 | if not mappings: | 338 | if not mappings: |
1858 | 293 | return {} | 339 | return {} |
1859 | 294 | 340 | ||
1860 | 295 | # For backwards-compatibility we need to support port-only provided in | 341 | # For backwards-compatibility we need to support port-only provided in |
1861 | 296 | # config. | 342 | # config. |
1870 | 297 | _mappings = {default_bridge: mappings.split()[0]} | 343 | _mappings = {mappings.split()[0]: default_bridge} |
1871 | 298 | 344 | ||
1872 | 299 | bridges = _mappings.keys() | 345 | ports = _mappings.keys() |
1865 | 300 | ports = _mappings.values() | ||
1866 | 301 | if len(set(bridges)) != len(bridges): | ||
1867 | 302 | raise Exception("It is not allowed to have more than one port " | ||
1868 | 303 | "configured on the same bridge") | ||
1869 | 304 | |||
1873 | 305 | if len(set(ports)) != len(ports): | 346 | if len(set(ports)) != len(ports): |
1874 | 306 | raise Exception("It is not allowed to have the same port configured " | 347 | raise Exception("It is not allowed to have the same port configured " |
1875 | 307 | "on more than one bridge") | 348 | "on more than one bridge") |
1876 | 308 | 349 | ||
1877 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' | |||
1878 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-10 07:35:12 +0000 | |||
1879 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-12-01 15:05:49 +0000 | |||
1880 | @@ -5,11 +5,17 @@ | |||
1881 | 5 | ############################################################################### | 5 | ############################################################################### |
1882 | 6 | [global] | 6 | [global] |
1883 | 7 | {% if auth -%} | 7 | {% if auth -%} |
1887 | 8 | auth_supported = {{ auth }} | 8 | auth_supported = {{ auth }} |
1888 | 9 | keyring = /etc/ceph/$cluster.$name.keyring | 9 | keyring = /etc/ceph/$cluster.$name.keyring |
1889 | 10 | mon host = {{ mon_hosts }} | 10 | mon host = {{ mon_hosts }} |
1890 | 11 | {% endif -%} | 11 | {% endif -%} |
1894 | 12 | log to syslog = {{ use_syslog }} | 12 | log to syslog = {{ use_syslog }} |
1895 | 13 | err to syslog = {{ use_syslog }} | 13 | err to syslog = {{ use_syslog }} |
1896 | 14 | clog to syslog = {{ use_syslog }} | 14 | clog to syslog = {{ use_syslog }} |
1897 | 15 | 15 | ||
1898 | 16 | [client] | ||
1899 | 17 | {% if rbd_client_cache_settings -%} | ||
1900 | 18 | {% for key, value in rbd_client_cache_settings.iteritems() -%} | ||
1901 | 19 | {{ key }} = {{ value }} | ||
1902 | 20 | {% endfor -%} | ||
1903 | 21 | {%- endif %} | ||
1904 | 16 | \ No newline at end of file | 22 | \ No newline at end of file |
1905 | 17 | 23 | ||
1906 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
1907 | --- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-11 14:20:09 +0000 | |||
1908 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2015-12-01 15:05:49 +0000 | |||
1909 | @@ -18,7 +18,7 @@ | |||
1910 | 18 | 18 | ||
1911 | 19 | import six | 19 | import six |
1912 | 20 | 20 | ||
1914 | 21 | from charmhelpers.fetch import apt_install | 21 | from charmhelpers.fetch import apt_install, apt_update |
1915 | 22 | from charmhelpers.core.hookenv import ( | 22 | from charmhelpers.core.hookenv import ( |
1916 | 23 | log, | 23 | log, |
1917 | 24 | ERROR, | 24 | ERROR, |
1918 | @@ -29,39 +29,15 @@ | |||
1919 | 29 | try: | 29 | try: |
1920 | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
1921 | 31 | except ImportError: | 31 | except ImportError: |
1924 | 32 | # python-jinja2 may not be installed yet, or we're running unittests. | 32 | apt_update(fatal=True) |
1925 | 33 | FileSystemLoader = ChoiceLoader = Environment = exceptions = None | 33 | apt_install('python-jinja2', fatal=True) |
1926 | 34 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | ||
1927 | 34 | 35 | ||
1928 | 35 | 36 | ||
1929 | 36 | class OSConfigException(Exception): | 37 | class OSConfigException(Exception): |
1930 | 37 | pass | 38 | pass |
1931 | 38 | 39 | ||
1932 | 39 | 40 | ||
1933 | 40 | def os_template_dirs(templates_dir, os_release): | ||
1934 | 41 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | ||
1935 | 42 | for rel in six.itervalues(OPENSTACK_CODENAMES)] | ||
1936 | 43 | |||
1937 | 44 | if not os.path.isdir(templates_dir): | ||
1938 | 45 | log('Templates directory not found @ %s.' % templates_dir, | ||
1939 | 46 | level=ERROR) | ||
1940 | 47 | raise OSConfigException | ||
1941 | 48 | dirs = [templates_dir] | ||
1942 | 49 | helper_templates = os.path.join(os.path.dirname(__file__), 'templates') | ||
1943 | 50 | if os.path.isdir(helper_templates): | ||
1944 | 51 | dirs.append(helper_templates) | ||
1945 | 52 | |||
1946 | 53 | for rel, tmpl_dir in tmpl_dirs: | ||
1947 | 54 | if os.path.isdir(tmpl_dir): | ||
1948 | 55 | dirs.insert(0, tmpl_dir) | ||
1949 | 56 | if rel == os_release: | ||
1950 | 57 | break | ||
1951 | 58 | ch_templates = os.path.dirname(__file__) + '/charmhelpers/contrib/openstack/templates' | ||
1952 | 59 | dirs.append(ch_templates) | ||
1953 | 60 | log('Template search path: %s' % | ||
1954 | 61 | ' '.join(dirs), level=INFO) | ||
1955 | 62 | return dirs | ||
1956 | 63 | |||
1957 | 64 | |||
1958 | 65 | def get_loader(templates_dir, os_release): | 41 | def get_loader(templates_dir, os_release): |
1959 | 66 | """ | 42 | """ |
1960 | 67 | Create a jinja2.ChoiceLoader containing template dirs up to | 43 | Create a jinja2.ChoiceLoader containing template dirs up to |
1961 | @@ -137,7 +113,7 @@ | |||
1962 | 137 | 113 | ||
1963 | 138 | def complete_contexts(self): | 114 | def complete_contexts(self): |
1964 | 139 | ''' | 115 | ''' |
1966 | 140 | Return a list of interfaces that have atisfied contexts. | 116 | Return a list of interfaces that have satisfied contexts. |
1967 | 141 | ''' | 117 | ''' |
1968 | 142 | if self._complete_contexts: | 118 | if self._complete_contexts: |
1969 | 143 | return self._complete_contexts | 119 | return self._complete_contexts |
1970 | @@ -318,3 +294,30 @@ | |||
1971 | 318 | [interfaces.extend(i.complete_contexts()) | 294 | [interfaces.extend(i.complete_contexts()) |
1972 | 319 | for i in six.itervalues(self.templates)] | 295 | for i in six.itervalues(self.templates)] |
1973 | 320 | return interfaces | 296 | return interfaces |
1974 | 297 | |||
1975 | 298 | def get_incomplete_context_data(self, interfaces): | ||
1976 | 299 | ''' | ||
1977 | 300 | Return dictionary of relation status of interfaces and any missing | ||
1978 | 301 | required context data. Example: | ||
1979 | 302 | {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, | ||
1980 | 303 | 'zeromq-configuration': {'related': False}} | ||
1981 | 304 | ''' | ||
1982 | 305 | incomplete_context_data = {} | ||
1983 | 306 | |||
1984 | 307 | for i in six.itervalues(self.templates): | ||
1985 | 308 | for context in i.contexts: | ||
1986 | 309 | for interface in interfaces: | ||
1987 | 310 | related = False | ||
1988 | 311 | if interface in context.interfaces: | ||
1989 | 312 | related = context.get_related() | ||
1990 | 313 | missing_data = context.missing_data | ||
1991 | 314 | if missing_data: | ||
1992 | 315 | incomplete_context_data[interface] = {'missing_data': missing_data} | ||
1993 | 316 | if related: | ||
1994 | 317 | if incomplete_context_data.get(interface): | ||
1995 | 318 | incomplete_context_data[interface].update({'related': True}) | ||
1996 | 319 | else: | ||
1997 | 320 | incomplete_context_data[interface] = {'related': True} | ||
1998 | 321 | else: | ||
1999 | 322 | incomplete_context_data[interface] = {'related': False} | ||
2000 | 323 | return incomplete_context_data | ||
2001 | 321 | 324 | ||
2002 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
2003 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-17 12:23:31 +0000 | |||
2004 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-12-01 15:05:49 +0000 | |||
2005 | @@ -1,5 +1,3 @@ | |||
2006 | 1 | #!/usr/bin/python | ||
2007 | 2 | |||
2008 | 3 | # Copyright 2014-2015 Canonical Limited. | 1 | # Copyright 2014-2015 Canonical Limited. |
2009 | 4 | # | 2 | # |
2010 | 5 | # This file is part of charm-helpers. | 3 | # This file is part of charm-helpers. |
2011 | @@ -24,9 +22,11 @@ | |||
2012 | 24 | import json | 22 | import json |
2013 | 25 | import os | 23 | import os |
2014 | 26 | import sys | 24 | import sys |
2015 | 25 | import re | ||
2016 | 26 | |||
2017 | 27 | import six | ||
2018 | 28 | import traceback | ||
2019 | 27 | import uuid | 29 | import uuid |
2020 | 28 | |||
2021 | 29 | import six | ||
2022 | 30 | import yaml | 30 | import yaml |
2023 | 31 | 31 | ||
2024 | 32 | from charmhelpers.contrib.network import ip | 32 | from charmhelpers.contrib.network import ip |
2025 | @@ -36,13 +36,17 @@ | |||
2026 | 36 | ) | 36 | ) |
2027 | 37 | 37 | ||
2028 | 38 | from charmhelpers.core.hookenv import ( | 38 | from charmhelpers.core.hookenv import ( |
2029 | 39 | action_fail, | ||
2030 | 40 | action_set, | ||
2031 | 39 | config, | 41 | config, |
2032 | 40 | log as juju_log, | 42 | log as juju_log, |
2033 | 41 | charm_dir, | 43 | charm_dir, |
2034 | 42 | INFO, | 44 | INFO, |
2035 | 45 | related_units, | ||
2036 | 43 | relation_ids, | 46 | relation_ids, |
2037 | 44 | related_units, | ||
2038 | 45 | relation_set, | 47 | relation_set, |
2039 | 48 | status_set, | ||
2040 | 49 | hook_name | ||
2041 | 46 | ) | 50 | ) |
2042 | 47 | 51 | ||
2043 | 48 | from charmhelpers.contrib.storage.linux.lvm import ( | 52 | from charmhelpers.contrib.storage.linux.lvm import ( |
2044 | @@ -52,7 +56,8 @@ | |||
2045 | 52 | ) | 56 | ) |
2046 | 53 | 57 | ||
2047 | 54 | from charmhelpers.contrib.network.ip import ( | 58 | from charmhelpers.contrib.network.ip import ( |
2049 | 55 | get_ipv6_addr | 59 | get_ipv6_addr, |
2050 | 60 | is_ipv6, | ||
2051 | 56 | ) | 61 | ) |
2052 | 57 | 62 | ||
2053 | 58 | from charmhelpers.contrib.python.packages import ( | 63 | from charmhelpers.contrib.python.packages import ( |
2054 | @@ -71,7 +76,6 @@ | |||
2055 | 71 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | 76 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
2056 | 72 | 'restricted main multiverse universe') | 77 | 'restricted main multiverse universe') |
2057 | 73 | 78 | ||
2058 | 74 | |||
2059 | 75 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | 79 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
2060 | 76 | ('oneiric', 'diablo'), | 80 | ('oneiric', 'diablo'), |
2061 | 77 | ('precise', 'essex'), | 81 | ('precise', 'essex'), |
2062 | @@ -81,6 +85,7 @@ | |||
2063 | 81 | ('trusty', 'icehouse'), | 85 | ('trusty', 'icehouse'), |
2064 | 82 | ('utopic', 'juno'), | 86 | ('utopic', 'juno'), |
2065 | 83 | ('vivid', 'kilo'), | 87 | ('vivid', 'kilo'), |
2066 | 88 | ('wily', 'liberty'), | ||
2067 | 84 | ]) | 89 | ]) |
2068 | 85 | 90 | ||
2069 | 86 | 91 | ||
2070 | @@ -93,6 +98,7 @@ | |||
2071 | 93 | ('2014.1', 'icehouse'), | 98 | ('2014.1', 'icehouse'), |
2072 | 94 | ('2014.2', 'juno'), | 99 | ('2014.2', 'juno'), |
2073 | 95 | ('2015.1', 'kilo'), | 100 | ('2015.1', 'kilo'), |
2074 | 101 | ('2015.2', 'liberty'), | ||
2075 | 96 | ]) | 102 | ]) |
2076 | 97 | 103 | ||
2077 | 98 | # The ugly duckling | 104 | # The ugly duckling |
2078 | @@ -115,8 +121,42 @@ | |||
2079 | 115 | ('2.2.0', 'juno'), | 121 | ('2.2.0', 'juno'), |
2080 | 116 | ('2.2.1', 'kilo'), | 122 | ('2.2.1', 'kilo'), |
2081 | 117 | ('2.2.2', 'kilo'), | 123 | ('2.2.2', 'kilo'), |
2082 | 124 | ('2.3.0', 'liberty'), | ||
2083 | 125 | ('2.4.0', 'liberty'), | ||
2084 | 126 | ('2.5.0', 'liberty'), | ||
2085 | 118 | ]) | 127 | ]) |
2086 | 119 | 128 | ||
2087 | 129 | # >= Liberty version->codename mapping | ||
2088 | 130 | PACKAGE_CODENAMES = { | ||
2089 | 131 | 'nova-common': OrderedDict([ | ||
2090 | 132 | ('12.0.0', 'liberty'), | ||
2091 | 133 | ]), | ||
2092 | 134 | 'neutron-common': OrderedDict([ | ||
2093 | 135 | ('7.0.0', 'liberty'), | ||
2094 | 136 | ]), | ||
2095 | 137 | 'cinder-common': OrderedDict([ | ||
2096 | 138 | ('7.0.0', 'liberty'), | ||
2097 | 139 | ]), | ||
2098 | 140 | 'keystone': OrderedDict([ | ||
2099 | 141 | ('8.0.0', 'liberty'), | ||
2100 | 142 | ]), | ||
2101 | 143 | 'horizon-common': OrderedDict([ | ||
2102 | 144 | ('8.0.0', 'liberty'), | ||
2103 | 145 | ]), | ||
2104 | 146 | 'ceilometer-common': OrderedDict([ | ||
2105 | 147 | ('5.0.0', 'liberty'), | ||
2106 | 148 | ]), | ||
2107 | 149 | 'heat-common': OrderedDict([ | ||
2108 | 150 | ('5.0.0', 'liberty'), | ||
2109 | 151 | ]), | ||
2110 | 152 | 'glance-common': OrderedDict([ | ||
2111 | 153 | ('11.0.0', 'liberty'), | ||
2112 | 154 | ]), | ||
2113 | 155 | 'openstack-dashboard': OrderedDict([ | ||
2114 | 156 | ('8.0.0', 'liberty'), | ||
2115 | 157 | ]), | ||
2116 | 158 | } | ||
2117 | 159 | |||
2118 | 120 | DEFAULT_LOOPBACK_SIZE = '5G' | 160 | DEFAULT_LOOPBACK_SIZE = '5G' |
2119 | 121 | 161 | ||
2120 | 122 | 162 | ||
2121 | @@ -166,9 +206,9 @@ | |||
2122 | 166 | error_out(e) | 206 | error_out(e) |
2123 | 167 | 207 | ||
2124 | 168 | 208 | ||
2126 | 169 | def get_os_version_codename(codename): | 209 | def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): |
2127 | 170 | '''Determine OpenStack version number from codename.''' | 210 | '''Determine OpenStack version number from codename.''' |
2129 | 171 | for k, v in six.iteritems(OPENSTACK_CODENAMES): | 211 | for k, v in six.iteritems(version_map): |
2130 | 172 | if v == codename: | 212 | if v == codename: |
2131 | 173 | return k | 213 | return k |
2132 | 174 | e = 'Could not derive OpenStack version for '\ | 214 | e = 'Could not derive OpenStack version for '\ |
2133 | @@ -200,20 +240,31 @@ | |||
2134 | 200 | error_out(e) | 240 | error_out(e) |
2135 | 201 | 241 | ||
2136 | 202 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 242 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
2137 | 243 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) | ||
2138 | 244 | if match: | ||
2139 | 245 | vers = match.group(0) | ||
2140 | 203 | 246 | ||
2154 | 204 | try: | 247 | # >= Liberty independent project versions |
2155 | 205 | if 'swift' in pkg.name: | 248 | if (package in PACKAGE_CODENAMES and |
2156 | 206 | swift_vers = vers[:5] | 249 | vers in PACKAGE_CODENAMES[package]): |
2157 | 207 | if swift_vers not in SWIFT_CODENAMES: | 250 | return PACKAGE_CODENAMES[package][vers] |
2158 | 208 | # Deal with 1.10.0 upward | 251 | else: |
2159 | 209 | swift_vers = vers[:6] | 252 | # < Liberty co-ordinated project versions |
2160 | 210 | return SWIFT_CODENAMES[swift_vers] | 253 | try: |
2161 | 211 | else: | 254 | if 'swift' in pkg.name: |
2162 | 212 | vers = vers[:6] | 255 | swift_vers = vers[:5] |
2163 | 213 | return OPENSTACK_CODENAMES[vers] | 256 | if swift_vers not in SWIFT_CODENAMES: |
2164 | 214 | except KeyError: | 257 | # Deal with 1.10.0 upward |
2165 | 215 | e = 'Could not determine OpenStack codename for version %s' % vers | 258 | swift_vers = vers[:6] |
2166 | 216 | error_out(e) | 259 | return SWIFT_CODENAMES[swift_vers] |
2167 | 260 | else: | ||
2168 | 261 | vers = vers[:6] | ||
2169 | 262 | return OPENSTACK_CODENAMES[vers] | ||
2170 | 263 | except KeyError: | ||
2171 | 264 | if not fatal: | ||
2172 | 265 | return None | ||
2173 | 266 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
2174 | 267 | error_out(e) | ||
2175 | 217 | 268 | ||
2176 | 218 | 269 | ||
2177 | 219 | def get_os_version_package(pkg, fatal=True): | 270 | def get_os_version_package(pkg, fatal=True): |
2178 | @@ -323,6 +374,9 @@ | |||
2179 | 323 | 'kilo': 'trusty-updates/kilo', | 374 | 'kilo': 'trusty-updates/kilo', |
2180 | 324 | 'kilo/updates': 'trusty-updates/kilo', | 375 | 'kilo/updates': 'trusty-updates/kilo', |
2181 | 325 | 'kilo/proposed': 'trusty-proposed/kilo', | 376 | 'kilo/proposed': 'trusty-proposed/kilo', |
2182 | 377 | 'liberty': 'trusty-updates/liberty', | ||
2183 | 378 | 'liberty/updates': 'trusty-updates/liberty', | ||
2184 | 379 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
2185 | 326 | } | 380 | } |
2186 | 327 | 381 | ||
2187 | 328 | try: | 382 | try: |
2188 | @@ -388,7 +442,11 @@ | |||
2189 | 388 | import apt_pkg as apt | 442 | import apt_pkg as apt |
2190 | 389 | src = config('openstack-origin') | 443 | src = config('openstack-origin') |
2191 | 390 | cur_vers = get_os_version_package(package) | 444 | cur_vers = get_os_version_package(package) |
2193 | 391 | available_vers = get_os_version_install_source(src) | 445 | if "swift" in package: |
2194 | 446 | codename = get_os_codename_install_source(src) | ||
2195 | 447 | available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) | ||
2196 | 448 | else: | ||
2197 | 449 | available_vers = get_os_version_install_source(src) | ||
2198 | 392 | apt.init() | 450 | apt.init() |
2199 | 393 | return apt.version_compare(available_vers, cur_vers) == 1 | 451 | return apt.version_compare(available_vers, cur_vers) == 1 |
2200 | 394 | 452 | ||
2201 | @@ -465,6 +523,12 @@ | |||
2202 | 465 | relation_prefix=None): | 523 | relation_prefix=None): |
2203 | 466 | hosts = get_ipv6_addr(dynamic_only=False) | 524 | hosts = get_ipv6_addr(dynamic_only=False) |
2204 | 467 | 525 | ||
2205 | 526 | if config('vip'): | ||
2206 | 527 | vips = config('vip').split() | ||
2207 | 528 | for vip in vips: | ||
2208 | 529 | if vip and is_ipv6(vip): | ||
2209 | 530 | hosts.append(vip) | ||
2210 | 531 | |||
2211 | 468 | kwargs = {'database': database, | 532 | kwargs = {'database': database, |
2212 | 469 | 'username': database_user, | 533 | 'username': database_user, |
2213 | 470 | 'hostname': json.dumps(hosts)} | 534 | 'hostname': json.dumps(hosts)} |
2214 | @@ -518,6 +582,7 @@ | |||
2215 | 518 | Clone/install all specified OpenStack repositories. | 582 | Clone/install all specified OpenStack repositories. |
2216 | 519 | 583 | ||
2217 | 520 | The expected format of projects_yaml is: | 584 | The expected format of projects_yaml is: |
2218 | 585 | |||
2219 | 521 | repositories: | 586 | repositories: |
2220 | 522 | - {name: keystone, | 587 | - {name: keystone, |
2221 | 523 | repository: 'git://git.openstack.org/openstack/keystone.git', | 588 | repository: 'git://git.openstack.org/openstack/keystone.git', |
2222 | @@ -525,11 +590,13 @@ | |||
2223 | 525 | - {name: requirements, | 590 | - {name: requirements, |
2224 | 526 | repository: 'git://git.openstack.org/openstack/requirements.git', | 591 | repository: 'git://git.openstack.org/openstack/requirements.git', |
2225 | 527 | branch: 'stable/icehouse'} | 592 | branch: 'stable/icehouse'} |
2226 | 593 | |||
2227 | 528 | directory: /mnt/openstack-git | 594 | directory: /mnt/openstack-git |
2228 | 529 | http_proxy: squid-proxy-url | 595 | http_proxy: squid-proxy-url |
2229 | 530 | https_proxy: squid-proxy-url | 596 | https_proxy: squid-proxy-url |
2230 | 531 | 597 | ||
2232 | 532 | The directory, http_proxy, and https_proxy keys are optional. | 598 | The directory, http_proxy, and https_proxy keys are optional. |
2233 | 599 | |||
2234 | 533 | """ | 600 | """ |
2235 | 534 | global requirements_dir | 601 | global requirements_dir |
2236 | 535 | parent_dir = '/mnt/openstack-git' | 602 | parent_dir = '/mnt/openstack-git' |
2237 | @@ -551,6 +618,12 @@ | |||
2238 | 551 | 618 | ||
2239 | 552 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) | 619 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
2240 | 553 | 620 | ||
2241 | 621 | # Upgrade setuptools and pip from default virtualenv versions. The default | ||
2242 | 622 | # versions in trusty break master OpenStack branch deployments. | ||
2243 | 623 | for p in ['pip', 'setuptools']: | ||
2244 | 624 | pip_install(p, upgrade=True, proxy=http_proxy, | ||
2245 | 625 | venv=os.path.join(parent_dir, 'venv')) | ||
2246 | 626 | |||
2247 | 554 | for p in projects['repositories']: | 627 | for p in projects['repositories']: |
2248 | 555 | repo = p['repository'] | 628 | repo = p['repository'] |
2249 | 556 | branch = p['branch'] | 629 | branch = p['branch'] |
2250 | @@ -612,24 +685,24 @@ | |||
2251 | 612 | else: | 685 | else: |
2252 | 613 | repo_dir = dest_dir | 686 | repo_dir = dest_dir |
2253 | 614 | 687 | ||
2254 | 688 | venv = os.path.join(parent_dir, 'venv') | ||
2255 | 689 | |||
2256 | 615 | if update_requirements: | 690 | if update_requirements: |
2257 | 616 | if not requirements_dir: | 691 | if not requirements_dir: |
2258 | 617 | error_out('requirements repo must be cloned before ' | 692 | error_out('requirements repo must be cloned before ' |
2259 | 618 | 'updating from global requirements.') | 693 | 'updating from global requirements.') |
2261 | 619 | _git_update_requirements(repo_dir, requirements_dir) | 694 | _git_update_requirements(venv, repo_dir, requirements_dir) |
2262 | 620 | 695 | ||
2263 | 621 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) | 696 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
2264 | 622 | if http_proxy: | 697 | if http_proxy: |
2267 | 623 | pip_install(repo_dir, proxy=http_proxy, | 698 | pip_install(repo_dir, proxy=http_proxy, venv=venv) |
2266 | 624 | venv=os.path.join(parent_dir, 'venv')) | ||
2268 | 625 | else: | 699 | else: |
2271 | 626 | pip_install(repo_dir, | 700 | pip_install(repo_dir, venv=venv) |
2270 | 627 | venv=os.path.join(parent_dir, 'venv')) | ||
2272 | 628 | 701 | ||
2273 | 629 | return repo_dir | 702 | return repo_dir |
2274 | 630 | 703 | ||
2275 | 631 | 704 | ||
2277 | 632 | def _git_update_requirements(package_dir, reqs_dir): | 705 | def _git_update_requirements(venv, package_dir, reqs_dir): |
2278 | 633 | """ | 706 | """ |
2279 | 634 | Update from global requirements. | 707 | Update from global requirements. |
2280 | 635 | 708 | ||
2281 | @@ -638,12 +711,14 @@ | |||
2282 | 638 | """ | 711 | """ |
2283 | 639 | orig_dir = os.getcwd() | 712 | orig_dir = os.getcwd() |
2284 | 640 | os.chdir(reqs_dir) | 713 | os.chdir(reqs_dir) |
2286 | 641 | cmd = ['python', 'update.py', package_dir] | 714 | python = os.path.join(venv, 'bin/python') |
2287 | 715 | cmd = [python, 'update.py', package_dir] | ||
2288 | 642 | try: | 716 | try: |
2289 | 643 | subprocess.check_call(cmd) | 717 | subprocess.check_call(cmd) |
2290 | 644 | except subprocess.CalledProcessError: | 718 | except subprocess.CalledProcessError: |
2291 | 645 | package = os.path.basename(package_dir) | 719 | package = os.path.basename(package_dir) |
2293 | 646 | error_out("Error updating {} from global-requirements.txt".format(package)) | 720 | error_out("Error updating {} from " |
2294 | 721 | "global-requirements.txt".format(package)) | ||
2295 | 647 | os.chdir(orig_dir) | 722 | os.chdir(orig_dir) |
2296 | 648 | 723 | ||
2297 | 649 | 724 | ||
2298 | @@ -691,6 +766,222 @@ | |||
2299 | 691 | return None | 766 | return None |
2300 | 692 | 767 | ||
2301 | 693 | 768 | ||
2302 | 769 | def os_workload_status(configs, required_interfaces, charm_func=None): | ||
2303 | 770 | """ | ||
2304 | 771 | Decorator to set workload status based on complete contexts | ||
2305 | 772 | """ | ||
2306 | 773 | def wrap(f): | ||
2307 | 774 | @wraps(f) | ||
2308 | 775 | def wrapped_f(*args, **kwargs): | ||
2309 | 776 | # Run the original function first | ||
2310 | 777 | f(*args, **kwargs) | ||
2311 | 778 | # Set workload status now that contexts have been | ||
2312 | 779 | # acted on | ||
2313 | 780 | set_os_workload_status(configs, required_interfaces, charm_func) | ||
2314 | 781 | return wrapped_f | ||
2315 | 782 | return wrap | ||
2316 | 783 | |||
2317 | 784 | |||
2318 | 785 | def set_os_workload_status(configs, required_interfaces, charm_func=None): | ||
2319 | 786 | """ | ||
2320 | 787 | Set workload status based on complete contexts. | ||
2321 | 788 | status-set missing or incomplete contexts | ||
2322 | 789 | and juju-log details of missing required data. | ||
2323 | 790 | charm_func is a charm specific function to run checking | ||
2324 | 791 | for charm specific requirements such as a VIP setting. | ||
2325 | 792 | """ | ||
2326 | 793 | incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) | ||
2327 | 794 | state = 'active' | ||
2328 | 795 | missing_relations = [] | ||
2329 | 796 | incomplete_relations = [] | ||
2330 | 797 | message = None | ||
2331 | 798 | charm_state = None | ||
2332 | 799 | charm_message = None | ||
2333 | 800 | |||
2334 | 801 | for generic_interface in incomplete_rel_data.keys(): | ||
2335 | 802 | related_interface = None | ||
2336 | 803 | missing_data = {} | ||
2337 | 804 | # Related or not? | ||
2338 | 805 | for interface in incomplete_rel_data[generic_interface]: | ||
2339 | 806 | if incomplete_rel_data[generic_interface][interface].get('related'): | ||
2340 | 807 | related_interface = interface | ||
2341 | 808 | missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') | ||
2342 | 809 | # No relation ID for the generic_interface | ||
2343 | 810 | if not related_interface: | ||
2344 | 811 | juju_log("{} relation is missing and must be related for " | ||
2345 | 812 | "functionality. ".format(generic_interface), 'WARN') | ||
2346 | 813 | state = 'blocked' | ||
2347 | 814 | if generic_interface not in missing_relations: | ||
2348 | 815 | missing_relations.append(generic_interface) | ||
2349 | 816 | else: | ||
2350 | 817 | # Relation ID exists but no related unit | ||
2351 | 818 | if not missing_data: | ||
2352 | 819 | # Edge case relation ID exists but departing | ||
2353 | 820 | if ('departed' in hook_name() or 'broken' in hook_name()) \ | ||
2354 | 821 | and related_interface in hook_name(): | ||
2355 | 822 | state = 'blocked' | ||
2356 | 823 | if generic_interface not in missing_relations: | ||
2357 | 824 | missing_relations.append(generic_interface) | ||
2358 | 825 | juju_log("{} relation's interface, {}, " | ||
2359 | 826 | "relationship is departed or broken " | ||
2360 | 827 | "and is required for functionality." | ||
2361 | 828 | "".format(generic_interface, related_interface), "WARN") | ||
2362 | 829 | # Normal case relation ID exists but no related unit | ||
2363 | 830 | # (joining) | ||
2364 | 831 | else: | ||
2365 | 832 | juju_log("{} relations's interface, {}, is related but has " | ||
2366 | 833 | "no units in the relation." | ||
2367 | 834 | "".format(generic_interface, related_interface), "INFO") | ||
2368 | 835 | # Related unit exists and data missing on the relation | ||
2369 | 836 | else: | ||
2370 | 837 | juju_log("{} relation's interface, {}, is related awaiting " | ||
2371 | 838 | "the following data from the relationship: {}. " | ||
2372 | 839 | "".format(generic_interface, related_interface, | ||
2373 | 840 | ", ".join(missing_data)), "INFO") | ||
2374 | 841 | if state != 'blocked': | ||
2375 | 842 | state = 'waiting' | ||
2376 | 843 | if generic_interface not in incomplete_relations \ | ||
2377 | 844 | and generic_interface not in missing_relations: | ||
2378 | 845 | incomplete_relations.append(generic_interface) | ||
2379 | 846 | |||
2380 | 847 | if missing_relations: | ||
2381 | 848 | message = "Missing relations: {}".format(", ".join(missing_relations)) | ||
2382 | 849 | if incomplete_relations: | ||
2383 | 850 | message += "; incomplete relations: {}" \ | ||
2384 | 851 | "".format(", ".join(incomplete_relations)) | ||
2385 | 852 | state = 'blocked' | ||
2386 | 853 | elif incomplete_relations: | ||
2387 | 854 | message = "Incomplete relations: {}" \ | ||
2388 | 855 | "".format(", ".join(incomplete_relations)) | ||
2389 | 856 | state = 'waiting' | ||
2390 | 857 | |||
2391 | 858 | # Run charm specific checks | ||
2392 | 859 | if charm_func: | ||
2393 | 860 | charm_state, charm_message = charm_func(configs) | ||
2394 | 861 | if charm_state != 'active' and charm_state != 'unknown': | ||
2395 | 862 | state = workload_state_compare(state, charm_state) | ||
2396 | 863 | if message: | ||
2397 | 864 | charm_message = charm_message.replace("Incomplete relations: ", | ||
2398 | 865 | "") | ||
2399 | 866 | message = "{}, {}".format(message, charm_message) | ||
2400 | 867 | else: | ||
2401 | 868 | message = charm_message | ||
2402 | 869 | |||
2403 | 870 | # Set to active if all requirements have been met | ||
2404 | 871 | if state == 'active': | ||
2405 | 872 | message = "Unit is ready" | ||
2406 | 873 | juju_log(message, "INFO") | ||
2407 | 874 | |||
2408 | 875 | status_set(state, message) | ||
2409 | 876 | |||
2410 | 877 | |||
2411 | 878 | def workload_state_compare(current_workload_state, workload_state): | ||
2412 | 879 | """ Return highest priority of two states""" | ||
2413 | 880 | hierarchy = {'unknown': -1, | ||
2414 | 881 | 'active': 0, | ||
2415 | 882 | 'maintenance': 1, | ||
2416 | 883 | 'waiting': 2, | ||
2417 | 884 | 'blocked': 3, | ||
2418 | 885 | } | ||
2419 | 886 | |||
2420 | 887 | if hierarchy.get(workload_state) is None: | ||
2421 | 888 | workload_state = 'unknown' | ||
2422 | 889 | if hierarchy.get(current_workload_state) is None: | ||
2423 | 890 | current_workload_state = 'unknown' | ||
2424 | 891 | |||
2425 | 892 | # Set workload_state based on hierarchy of statuses | ||
2426 | 893 | if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): | ||
2427 | 894 | return current_workload_state | ||
2428 | 895 | else: | ||
2429 | 896 | return workload_state | ||
2430 | 897 | |||
2431 | 898 | |||
2432 | 899 | def incomplete_relation_data(configs, required_interfaces): | ||
2433 | 900 | """ | ||
2434 | 901 | Check complete contexts against required_interfaces | ||
2435 | 902 | Return dictionary of incomplete relation data. | ||
2436 | 903 | |||
2437 | 904 | configs is an OSConfigRenderer object with configs registered | ||
2438 | 905 | |||
2439 | 906 | required_interfaces is a dictionary of required general interfaces | ||
2440 | 907 | with dictionary values of possible specific interfaces. | ||
2441 | 908 | Example: | ||
2442 | 909 | required_interfaces = {'database': ['shared-db', 'pgsql-db']} | ||
2443 | 910 | |||
2444 | 911 | The interface is said to be satisfied if anyone of the interfaces in the | ||
2445 | 912 | list has a complete context. | ||
2446 | 913 | |||
2447 | 914 | Return dictionary of incomplete or missing required contexts with relation | ||
2448 | 915 | status of interfaces and any missing data points. Example: | ||
2449 | 916 | {'message': | ||
2450 | 917 | {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, | ||
2451 | 918 | 'zeromq-configuration': {'related': False}}, | ||
2452 | 919 | 'identity': | ||
2453 | 920 | {'identity-service': {'related': False}}, | ||
2454 | 921 | 'database': | ||
2455 | 922 | {'pgsql-db': {'related': False}, | ||
2456 | 923 | 'shared-db': {'related': True}}} | ||
2457 | 924 | """ | ||
2458 | 925 | complete_ctxts = configs.complete_contexts() | ||
2459 | 926 | incomplete_relations = [] | ||
2460 | 927 | for svc_type in required_interfaces.keys(): | ||
2461 | 928 | # Avoid duplicates | ||
2462 | 929 | found_ctxt = False | ||
2463 | 930 | for interface in required_interfaces[svc_type]: | ||
2464 | 931 | if interface in complete_ctxts: | ||
2465 | 932 | found_ctxt = True | ||
2466 | 933 | if not found_ctxt: | ||
2467 | 934 | incomplete_relations.append(svc_type) | ||
2468 | 935 | incomplete_context_data = {} | ||
2469 | 936 | for i in incomplete_relations: | ||
2470 | 937 | incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) | ||
2471 | 938 | return incomplete_context_data | ||
2472 | 939 | |||
2473 | 940 | |||
2474 | 941 | def do_action_openstack_upgrade(package, upgrade_callback, configs): | ||
2475 | 942 | """Perform action-managed OpenStack upgrade. | ||
2476 | 943 | |||
2477 | 944 | Upgrades packages to the configured openstack-origin version and sets | ||
2478 | 945 | the corresponding action status as a result. | ||
2479 | 946 | |||
2480 | 947 | If the charm was installed from source we cannot upgrade it. | ||
2481 | 948 | For backwards compatibility a config flag (action-managed-upgrade) must | ||
2482 | 949 | be set for this code to run, otherwise a full service level upgrade will | ||
2483 | 950 | fire on config-changed. | ||
2484 | 951 | |||
2485 | 952 | @param package: package name for determining if upgrade available | ||
2486 | 953 | @param upgrade_callback: function callback to charm's upgrade function | ||
2487 | 954 | @param configs: templating object derived from OSConfigRenderer class | ||
2488 | 955 | |||
2489 | 956 | @return: True if upgrade successful; False if upgrade failed or skipped | ||
2490 | 957 | """ | ||
2491 | 958 | ret = False | ||
2492 | 959 | |||
2493 | 960 | if git_install_requested(): | ||
2494 | 961 | action_set({'outcome': 'installed from source, skipped upgrade.'}) | ||
2495 | 962 | else: | ||
2496 | 963 | if openstack_upgrade_available(package): | ||
2497 | 964 | if config('action-managed-upgrade'): | ||
2498 | 965 | juju_log('Upgrading OpenStack release') | ||
2499 | 966 | |||
2500 | 967 | try: | ||
2501 | 968 | upgrade_callback(configs=configs) | ||
2502 | 969 | action_set({'outcome': 'success, upgrade completed.'}) | ||
2503 | 970 | ret = True | ||
2504 | 971 | except: | ||
2505 | 972 | action_set({'outcome': 'upgrade failed, see traceback.'}) | ||
2506 | 973 | action_set({'traceback': traceback.format_exc()}) | ||
2507 | 974 | action_fail('do_openstack_upgrade resulted in an ' | ||
2508 | 975 | 'unexpected error') | ||
2509 | 976 | else: | ||
2510 | 977 | action_set({'outcome': 'action-managed-upgrade config is ' | ||
2511 | 978 | 'False, skipped upgrade.'}) | ||
2512 | 979 | else: | ||
2513 | 980 | action_set({'outcome': 'no upgrade available.'}) | ||
2514 | 981 | |||
2515 | 982 | return ret | ||
2516 | 983 | |||
2517 | 984 | |||
2518 | 694 | def remote_restart(rel_name, remote_service=None): | 985 | def remote_restart(rel_name, remote_service=None): |
2519 | 695 | trigger = { | 986 | trigger = { |
2520 | 696 | 'restart-trigger': str(uuid.uuid4()), | 987 | 'restart-trigger': str(uuid.uuid4()), |
2521 | @@ -700,7 +991,7 @@ | |||
2522 | 700 | for rid in relation_ids(rel_name): | 991 | for rid in relation_ids(rel_name): |
2523 | 701 | # This subordinate can be related to two seperate services using | 992 | # This subordinate can be related to two seperate services using |
2524 | 702 | # different subordinate relations so only issue the restart if | 993 | # different subordinate relations so only issue the restart if |
2526 | 703 | # thr principle is conencted down the relation we think it is | 994 | # the principle is conencted down the relation we think it is |
2527 | 704 | if related_units(relid=rid): | 995 | if related_units(relid=rid): |
2528 | 705 | relation_set(relation_id=rid, | 996 | relation_set(relation_id=rid, |
2529 | 706 | relation_settings=trigger, | 997 | relation_settings=trigger, |
2530 | 707 | 998 | ||
2531 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' | |||
2532 | --- hooks/charmhelpers/contrib/python/packages.py 2015-06-10 15:45:48 +0000 | |||
2533 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-12-01 15:05:49 +0000 | |||
2534 | @@ -36,6 +36,8 @@ | |||
2535 | 36 | def parse_options(given, available): | 36 | def parse_options(given, available): |
2536 | 37 | """Given a set of options, check if available""" | 37 | """Given a set of options, check if available""" |
2537 | 38 | for key, value in sorted(given.items()): | 38 | for key, value in sorted(given.items()): |
2538 | 39 | if not value: | ||
2539 | 40 | continue | ||
2540 | 39 | if key in available: | 41 | if key in available: |
2541 | 40 | yield "--{0}={1}".format(key, value) | 42 | yield "--{0}={1}".format(key, value) |
2542 | 41 | 43 | ||
2543 | 42 | 44 | ||
2544 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
2545 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-10 15:45:48 +0000 | |||
2546 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-12-01 15:05:49 +0000 | |||
2547 | @@ -26,8 +26,10 @@ | |||
2548 | 26 | 26 | ||
2549 | 27 | import os | 27 | import os |
2550 | 28 | import shutil | 28 | import shutil |
2551 | 29 | import six | ||
2552 | 29 | import json | 30 | import json |
2553 | 30 | import time | 31 | import time |
2554 | 32 | import uuid | ||
2555 | 31 | 33 | ||
2556 | 32 | from subprocess import ( | 34 | from subprocess import ( |
2557 | 33 | check_call, | 35 | check_call, |
2558 | @@ -35,8 +37,10 @@ | |||
2559 | 35 | CalledProcessError, | 37 | CalledProcessError, |
2560 | 36 | ) | 38 | ) |
2561 | 37 | from charmhelpers.core.hookenv import ( | 39 | from charmhelpers.core.hookenv import ( |
2562 | 40 | local_unit, | ||
2563 | 38 | relation_get, | 41 | relation_get, |
2564 | 39 | relation_ids, | 42 | relation_ids, |
2565 | 43 | relation_set, | ||
2566 | 40 | related_units, | 44 | related_units, |
2567 | 41 | log, | 45 | log, |
2568 | 42 | DEBUG, | 46 | DEBUG, |
2569 | @@ -56,16 +60,18 @@ | |||
2570 | 56 | apt_install, | 60 | apt_install, |
2571 | 57 | ) | 61 | ) |
2572 | 58 | 62 | ||
2573 | 63 | from charmhelpers.core.kernel import modprobe | ||
2574 | 64 | |||
2575 | 59 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' | 65 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' |
2576 | 60 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | 66 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
2577 | 61 | 67 | ||
2578 | 62 | CEPH_CONF = """[global] | 68 | CEPH_CONF = """[global] |
2585 | 63 | auth supported = {auth} | 69 | auth supported = {auth} |
2586 | 64 | keyring = {keyring} | 70 | keyring = {keyring} |
2587 | 65 | mon host = {mon_hosts} | 71 | mon host = {mon_hosts} |
2588 | 66 | log to syslog = {use_syslog} | 72 | log to syslog = {use_syslog} |
2589 | 67 | err to syslog = {use_syslog} | 73 | err to syslog = {use_syslog} |
2590 | 68 | clog to syslog = {use_syslog} | 74 | clog to syslog = {use_syslog} |
2591 | 69 | """ | 75 | """ |
2592 | 70 | 76 | ||
2593 | 71 | 77 | ||
2594 | @@ -120,29 +126,37 @@ | |||
2595 | 120 | return None | 126 | return None |
2596 | 121 | 127 | ||
2597 | 122 | 128 | ||
2599 | 123 | def create_pool(service, name, replicas=3): | 129 | def update_pool(client, pool, settings): |
2600 | 130 | cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] | ||
2601 | 131 | for k, v in six.iteritems(settings): | ||
2602 | 132 | cmd.append(k) | ||
2603 | 133 | cmd.append(v) | ||
2604 | 134 | |||
2605 | 135 | check_call(cmd) | ||
2606 | 136 | |||
2607 | 137 | |||
2608 | 138 | def create_pool(service, name, replicas=3, pg_num=None): | ||
2609 | 124 | """Create a new RADOS pool.""" | 139 | """Create a new RADOS pool.""" |
2610 | 125 | if pool_exists(service, name): | 140 | if pool_exists(service, name): |
2611 | 126 | log("Ceph pool {} already exists, skipping creation".format(name), | 141 | log("Ceph pool {} already exists, skipping creation".format(name), |
2612 | 127 | level=WARNING) | 142 | level=WARNING) |
2613 | 128 | return | 143 | return |
2614 | 129 | 144 | ||
2631 | 130 | # Calculate the number of placement groups based | 145 | if not pg_num: |
2632 | 131 | # on upstream recommended best practices. | 146 | # Calculate the number of placement groups based |
2633 | 132 | osds = get_osds(service) | 147 | # on upstream recommended best practices. |
2634 | 133 | if osds: | 148 | osds = get_osds(service) |
2635 | 134 | pgnum = (len(osds) * 100 // replicas) | 149 | if osds: |
2636 | 135 | else: | 150 | pg_num = (len(osds) * 100 // replicas) |
2637 | 136 | # NOTE(james-page): Default to 200 for older ceph versions | 151 | else: |
2638 | 137 | # which don't support OSD query from cli | 152 | # NOTE(james-page): Default to 200 for older ceph versions |
2639 | 138 | pgnum = 200 | 153 | # which don't support OSD query from cli |
2640 | 139 | 154 | pg_num = 200 | |
2641 | 140 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] | 155 | |
2642 | 141 | check_call(cmd) | 156 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] |
2643 | 142 | 157 | check_call(cmd) | |
2644 | 143 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', | 158 | |
2645 | 144 | str(replicas)] | 159 | update_pool(service, name, settings={'size': str(replicas)}) |
2630 | 145 | check_call(cmd) | ||
2646 | 146 | 160 | ||
2647 | 147 | 161 | ||
2648 | 148 | def delete_pool(service, name): | 162 | def delete_pool(service, name): |
2649 | @@ -197,10 +211,10 @@ | |||
2650 | 197 | log('Created new keyfile at %s.' % keyfile, level=INFO) | 211 | log('Created new keyfile at %s.' % keyfile, level=INFO) |
2651 | 198 | 212 | ||
2652 | 199 | 213 | ||
2655 | 200 | def get_ceph_nodes(): | 214 | def get_ceph_nodes(relation='ceph'): |
2656 | 201 | """Query named relation 'ceph' to determine current nodes.""" | 215 | """Query named relation to determine current nodes.""" |
2657 | 202 | hosts = [] | 216 | hosts = [] |
2659 | 203 | for r_id in relation_ids('ceph'): | 217 | for r_id in relation_ids(relation): |
2660 | 204 | for unit in related_units(r_id): | 218 | for unit in related_units(r_id): |
2661 | 205 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | 219 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
2662 | 206 | 220 | ||
2663 | @@ -288,17 +302,6 @@ | |||
2664 | 288 | os.chown(data_src_dst, uid, gid) | 302 | os.chown(data_src_dst, uid, gid) |
2665 | 289 | 303 | ||
2666 | 290 | 304 | ||
2667 | 291 | # TODO: re-use | ||
2668 | 292 | def modprobe(module): | ||
2669 | 293 | """Load a kernel module and configure for auto-load on reboot.""" | ||
2670 | 294 | log('Loading kernel module', level=INFO) | ||
2671 | 295 | cmd = ['modprobe', module] | ||
2672 | 296 | check_call(cmd) | ||
2673 | 297 | with open('/etc/modules', 'r+') as modules: | ||
2674 | 298 | if module not in modules.read(): | ||
2675 | 299 | modules.write(module) | ||
2676 | 300 | |||
2677 | 301 | |||
2678 | 302 | def copy_files(src, dst, symlinks=False, ignore=None): | 305 | def copy_files(src, dst, symlinks=False, ignore=None): |
2679 | 303 | """Copy files from src to dst.""" | 306 | """Copy files from src to dst.""" |
2680 | 304 | for item in os.listdir(src): | 307 | for item in os.listdir(src): |
2681 | @@ -363,14 +366,14 @@ | |||
2682 | 363 | service_start(svc) | 366 | service_start(svc) |
2683 | 364 | 367 | ||
2684 | 365 | 368 | ||
2686 | 366 | def ensure_ceph_keyring(service, user=None, group=None): | 369 | def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): |
2687 | 367 | """Ensures a ceph keyring is created for a named service and optionally | 370 | """Ensures a ceph keyring is created for a named service and optionally |
2688 | 368 | ensures user and group ownership. | 371 | ensures user and group ownership. |
2689 | 369 | 372 | ||
2690 | 370 | Returns False if no ceph key is available in relation state. | 373 | Returns False if no ceph key is available in relation state. |
2691 | 371 | """ | 374 | """ |
2692 | 372 | key = None | 375 | key = None |
2694 | 373 | for rid in relation_ids('ceph'): | 376 | for rid in relation_ids(relation): |
2695 | 374 | for unit in related_units(rid): | 377 | for unit in related_units(rid): |
2696 | 375 | key = relation_get('key', rid=rid, unit=unit) | 378 | key = relation_get('key', rid=rid, unit=unit) |
2697 | 376 | if key: | 379 | if key: |
2698 | @@ -411,17 +414,59 @@ | |||
2699 | 411 | 414 | ||
2700 | 412 | The API is versioned and defaults to version 1. | 415 | The API is versioned and defaults to version 1. |
2701 | 413 | """ | 416 | """ |
2703 | 414 | def __init__(self, api_version=1): | 417 | def __init__(self, api_version=1, request_id=None): |
2704 | 415 | self.api_version = api_version | 418 | self.api_version = api_version |
2705 | 419 | if request_id: | ||
2706 | 420 | self.request_id = request_id | ||
2707 | 421 | else: | ||
2708 | 422 | self.request_id = str(uuid.uuid1()) | ||
2709 | 416 | self.ops = [] | 423 | self.ops = [] |
2710 | 417 | 424 | ||
2712 | 418 | def add_op_create_pool(self, name, replica_count=3): | 425 | def add_op_create_pool(self, name, replica_count=3, pg_num=None): |
2713 | 426 | """Adds an operation to create a pool. | ||
2714 | 427 | |||
2715 | 428 | @param pg_num setting: optional setting. If not provided, this value | ||
2716 | 429 | will be calculated by the broker based on how many OSDs are in the | ||
2717 | 430 | cluster at the time of creation. Note that, if provided, this value | ||
2718 | 431 | will be capped at the current available maximum. | ||
2719 | 432 | """ | ||
2720 | 419 | self.ops.append({'op': 'create-pool', 'name': name, | 433 | self.ops.append({'op': 'create-pool', 'name': name, |
2722 | 420 | 'replicas': replica_count}) | 434 | 'replicas': replica_count, 'pg_num': pg_num}) |
2723 | 435 | |||
2724 | 436 | def set_ops(self, ops): | ||
2725 | 437 | """Set request ops to provided value. | ||
2726 | 438 | |||
2727 | 439 | Useful for injecting ops that come from a previous request | ||
2728 | 440 | to allow comparisons to ensure validity. | ||
2729 | 441 | """ | ||
2730 | 442 | self.ops = ops | ||
2731 | 421 | 443 | ||
2732 | 422 | @property | 444 | @property |
2733 | 423 | def request(self): | 445 | def request(self): |
2735 | 424 | return json.dumps({'api-version': self.api_version, 'ops': self.ops}) | 446 | return json.dumps({'api-version': self.api_version, 'ops': self.ops, |
2736 | 447 | 'request-id': self.request_id}) | ||
2737 | 448 | |||
2738 | 449 | def _ops_equal(self, other): | ||
2739 | 450 | if len(self.ops) == len(other.ops): | ||
2740 | 451 | for req_no in range(0, len(self.ops)): | ||
2741 | 452 | for key in ['replicas', 'name', 'op', 'pg_num']: | ||
2742 | 453 | if self.ops[req_no].get(key) != other.ops[req_no].get(key): | ||
2743 | 454 | return False | ||
2744 | 455 | else: | ||
2745 | 456 | return False | ||
2746 | 457 | return True | ||
2747 | 458 | |||
2748 | 459 | def __eq__(self, other): | ||
2749 | 460 | if not isinstance(other, self.__class__): | ||
2750 | 461 | return False | ||
2751 | 462 | if self.api_version == other.api_version and \ | ||
2752 | 463 | self._ops_equal(other): | ||
2753 | 464 | return True | ||
2754 | 465 | else: | ||
2755 | 466 | return False | ||
2756 | 467 | |||
2757 | 468 | def __ne__(self, other): | ||
2758 | 469 | return not self.__eq__(other) | ||
2759 | 425 | 470 | ||
2760 | 426 | 471 | ||
2761 | 427 | class CephBrokerRsp(object): | 472 | class CephBrokerRsp(object): |
2762 | @@ -431,14 +476,198 @@ | |||
2763 | 431 | 476 | ||
2764 | 432 | The API is versioned and defaults to version 1. | 477 | The API is versioned and defaults to version 1. |
2765 | 433 | """ | 478 | """ |
2766 | 479 | |||
2767 | 434 | def __init__(self, encoded_rsp): | 480 | def __init__(self, encoded_rsp): |
2768 | 435 | self.api_version = None | 481 | self.api_version = None |
2769 | 436 | self.rsp = json.loads(encoded_rsp) | 482 | self.rsp = json.loads(encoded_rsp) |
2770 | 437 | 483 | ||
2771 | 438 | @property | 484 | @property |
2772 | 485 | def request_id(self): | ||
2773 | 486 | return self.rsp.get('request-id') | ||
2774 | 487 | |||
2775 | 488 | @property | ||
2776 | 439 | def exit_code(self): | 489 | def exit_code(self): |
2777 | 440 | return self.rsp.get('exit-code') | 490 | return self.rsp.get('exit-code') |
2778 | 441 | 491 | ||
2779 | 442 | @property | 492 | @property |
2780 | 443 | def exit_msg(self): | 493 | def exit_msg(self): |
2781 | 444 | return self.rsp.get('stderr') | 494 | return self.rsp.get('stderr') |
2782 | 495 | |||
2783 | 496 | |||
2784 | 497 | # Ceph Broker Conversation: | ||
2785 | 498 | # If a charm needs an action to be taken by ceph it can create a CephBrokerRq | ||
2786 | 499 | # and send that request to ceph via the ceph relation. The CephBrokerRq has a | ||
2787 | 500 | # unique id so that the client can identity which CephBrokerRsp is associated | ||
2788 | 501 | # with the request. Ceph will also respond to each client unit individually | ||
2789 | 502 | # creating a response key per client unit eg glance/0 will get a CephBrokerRsp | ||
2790 | 503 | # via key broker-rsp-glance-0 | ||
2791 | 504 | # | ||
2792 | 505 | # To use this the charm can just do something like: | ||
2793 | 506 | # | ||
2794 | 507 | # from charmhelpers.contrib.storage.linux.ceph import ( | ||
2795 | 508 | # send_request_if_needed, | ||
2796 | 509 | # is_request_complete, | ||
2797 | 510 | # CephBrokerRq, | ||
2798 | 511 | # ) | ||
2799 | 512 | # | ||
2800 | 513 | # @hooks.hook('ceph-relation-changed') | ||
2801 | 514 | # def ceph_changed(): | ||
2802 | 515 | # rq = CephBrokerRq() | ||
2803 | 516 | # rq.add_op_create_pool(name='poolname', replica_count=3) | ||
2804 | 517 | # | ||
2805 | 518 | # if is_request_complete(rq): | ||
2806 | 519 | # <Request complete actions> | ||
2807 | 520 | # else: | ||
2808 | 521 | # send_request_if_needed(get_ceph_request()) | ||
2809 | 522 | # | ||
2810 | 523 | # CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example | ||
2811 | 524 | # of glance having sent a request to ceph which ceph has successfully processed | ||
2812 | 525 | # 'ceph:8': { | ||
2813 | 526 | # 'ceph/0': { | ||
2814 | 527 | # 'auth': 'cephx', | ||
2815 | 528 | # 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', | ||
2816 | 529 | # 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', | ||
2817 | 530 | # 'ceph-public-address': '10.5.44.103', | ||
2818 | 531 | # 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', | ||
2819 | 532 | # 'private-address': '10.5.44.103', | ||
2820 | 533 | # }, | ||
2821 | 534 | # 'glance/0': { | ||
2822 | 535 | # 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' | ||
2823 | 536 | # '"ops": [{"replicas": 3, "name": "glance", ' | ||
2824 | 537 | # '"op": "create-pool"}]}'), | ||
2825 | 538 | # 'private-address': '10.5.44.109', | ||
2826 | 539 | # }, | ||
2827 | 540 | # } | ||
2828 | 541 | |||
2829 | 542 | def get_previous_request(rid): | ||
2830 | 543 | """Return the last ceph broker request sent on a given relation | ||
2831 | 544 | |||
2832 | 545 | @param rid: Relation id to query for request | ||
2833 | 546 | """ | ||
2834 | 547 | request = None | ||
2835 | 548 | broker_req = relation_get(attribute='broker_req', rid=rid, | ||
2836 | 549 | unit=local_unit()) | ||
2837 | 550 | if broker_req: | ||
2838 | 551 | request_data = json.loads(broker_req) | ||
2839 | 552 | request = CephBrokerRq(api_version=request_data['api-version'], | ||
2840 | 553 | request_id=request_data['request-id']) | ||
2841 | 554 | request.set_ops(request_data['ops']) | ||
2842 | 555 | |||
2843 | 556 | return request | ||
2844 | 557 | |||
2845 | 558 | |||
2846 | 559 | def get_request_states(request, relation='ceph'): | ||
2847 | 560 | """Return a dict of requests per relation id with their corresponding | ||
2848 | 561 | completion state. | ||
2849 | 562 | |||
2850 | 563 | This allows a charm, which has a request for ceph, to see whether there is | ||
2851 | 564 | an equivalent request already being processed and if so what state that | ||
2852 | 565 | request is in. | ||
2853 | 566 | |||
2854 | 567 | @param request: A CephBrokerRq object | ||
2855 | 568 | """ | ||
2856 | 569 | complete = [] | ||
2857 | 570 | requests = {} | ||
2858 | 571 | for rid in relation_ids(relation): | ||
2859 | 572 | complete = False | ||
2860 | 573 | previous_request = get_previous_request(rid) | ||
2861 | 574 | if request == previous_request: | ||
2862 | 575 | sent = True | ||
2863 | 576 | complete = is_request_complete_for_rid(previous_request, rid) | ||
2864 | 577 | else: | ||
2865 | 578 | sent = False | ||
2866 | 579 | complete = False | ||
2867 | 580 | |||
2868 | 581 | requests[rid] = { | ||
2869 | 582 | 'sent': sent, | ||
2870 | 583 | 'complete': complete, | ||
2871 | 584 | } | ||
2872 | 585 | |||
2873 | 586 | return requests | ||
2874 | 587 | |||
2875 | 588 | |||
2876 | 589 | def is_request_sent(request, relation='ceph'): | ||
2877 | 590 | """Check to see if a functionally equivalent request has already been sent | ||
2878 | 591 | |||
2879 | 592 | Returns True if a similair request has been sent | ||
2880 | 593 | |||
2881 | 594 | @param request: A CephBrokerRq object | ||
2882 | 595 | """ | ||
2883 | 596 | states = get_request_states(request, relation=relation) | ||
2884 | 597 | for rid in states.keys(): | ||
2885 | 598 | if not states[rid]['sent']: | ||
2886 | 599 | return False | ||
2887 | 600 | |||
2888 | 601 | return True | ||
2889 | 602 | |||
2890 | 603 | |||
2891 | 604 | def is_request_complete(request, relation='ceph'): | ||
2892 | 605 | """Check to see if a functionally equivalent request has already been | ||
2893 | 606 | completed | ||
2894 | 607 | |||
2895 | 608 | Returns True if a similair request has been completed | ||
2896 | 609 | |||
2897 | 610 | @param request: A CephBrokerRq object | ||
2898 | 611 | """ | ||
2899 | 612 | states = get_request_states(request, relation=relation) | ||
2900 | 613 | for rid in states.keys(): | ||
2901 | 614 | if not states[rid]['complete']: | ||
2902 | 615 | return False | ||
2903 | 616 | |||
2904 | 617 | return True | ||
2905 | 618 | |||
2906 | 619 | |||
2907 | 620 | def is_request_complete_for_rid(request, rid): | ||
2908 | 621 | """Check if a given request has been completed on the given relation | ||
2909 | 622 | |||
2910 | 623 | @param request: A CephBrokerRq object | ||
2911 | 624 | @param rid: Relation ID | ||
2912 | 625 | """ | ||
2913 | 626 | broker_key = get_broker_rsp_key() | ||
2914 | 627 | for unit in related_units(rid): | ||
2915 | 628 | rdata = relation_get(rid=rid, unit=unit) | ||
2916 | 629 | if rdata.get(broker_key): | ||
2917 | 630 | rsp = CephBrokerRsp(rdata.get(broker_key)) | ||
2918 | 631 | if rsp.request_id == request.request_id: | ||
2919 | 632 | if not rsp.exit_code: | ||
2920 | 633 | return True | ||
2921 | 634 | else: | ||
2922 | 635 | # The remote unit sent no reply targeted at this unit so either the | ||
2923 | 636 | # remote ceph cluster does not support unit targeted replies or it | ||
2924 | 637 | # has not processed our request yet. | ||
2925 | 638 | if rdata.get('broker_rsp'): | ||
2926 | 639 | request_data = json.loads(rdata['broker_rsp']) | ||
2927 | 640 | if request_data.get('request-id'): | ||
2928 | 641 | log('Ignoring legacy broker_rsp without unit key as remote ' | ||
2929 | 642 | 'service supports unit specific replies', level=DEBUG) | ||
2930 | 643 | else: | ||
2931 | 644 | log('Using legacy broker_rsp as remote service does not ' | ||
2932 | 645 | 'supports unit specific replies', level=DEBUG) | ||
2933 | 646 | rsp = CephBrokerRsp(rdata['broker_rsp']) | ||
2934 | 647 | if not rsp.exit_code: | ||
2935 | 648 | return True | ||
2936 | 649 | |||
2937 | 650 | return False | ||
2938 | 651 | |||
2939 | 652 | |||
2940 | 653 | def get_broker_rsp_key(): | ||
2941 | 654 | """Return broker response key for this unit | ||
2942 | 655 | |||
2943 | 656 | This is the key that ceph is going to use to pass request status | ||
2944 | 657 | information back to this unit | ||
2945 | 658 | """ | ||
2946 | 659 | return 'broker-rsp-' + local_unit().replace('/', '-') | ||
2947 | 660 | |||
2948 | 661 | |||
2949 | 662 | def send_request_if_needed(request, relation='ceph'): | ||
2950 | 663 | """Send broker request if an equivalent request has not already been sent | ||
2951 | 664 | |||
2952 | 665 | @param request: A CephBrokerRq object | ||
2953 | 666 | """ | ||
2954 | 667 | if is_request_sent(request, relation=relation): | ||
2955 | 668 | log('Request already sent but not complete, not sending new request', | ||
2956 | 669 | level=DEBUG) | ||
2957 | 670 | else: | ||
2958 | 671 | for rid in relation_ids(relation): | ||
2959 | 672 | log('Sending request {}'.format(request.request_id), level=DEBUG) | ||
2960 | 673 | relation_set(relation_id=rid, broker_req=request.request) | ||
2961 | 445 | 674 | ||
2962 | === modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
2963 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-06-10 15:45:48 +0000 | |||
2964 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-12-01 15:05:49 +0000 | |||
2965 | @@ -76,3 +76,13 @@ | |||
2966 | 76 | check_call(cmd) | 76 | check_call(cmd) |
2967 | 77 | 77 | ||
2968 | 78 | return create_loopback(path) | 78 | return create_loopback(path) |
2969 | 79 | |||
2970 | 80 | |||
2971 | 81 | def is_mapped_loopback_device(device): | ||
2972 | 82 | """ | ||
2973 | 83 | Checks if a given device name is an existing/mapped loopback device. | ||
2974 | 84 | :param device: str: Full path to the device (eg, /dev/loop1). | ||
2975 | 85 | :returns: str: Path to the backing file if is a loopback device | ||
2976 | 86 | empty string otherwise | ||
2977 | 87 | """ | ||
2978 | 88 | return loopback_devices().get(device, "") | ||
2979 | 79 | 89 | ||
2980 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
2981 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-10 15:45:48 +0000 | |||
2982 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-12-01 15:05:49 +0000 | |||
2983 | @@ -43,9 +43,10 @@ | |||
2984 | 43 | 43 | ||
2985 | 44 | :param block_device: str: Full path of block device to clean. | 44 | :param block_device: str: Full path of block device to clean. |
2986 | 45 | ''' | 45 | ''' |
2987 | 46 | # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b | ||
2988 | 46 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up | 47 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
2991 | 47 | call(['sgdisk', '--zap-all', '--mbrtogpt', | 48 | call(['sgdisk', '--zap-all', '--', block_device]) |
2992 | 48 | '--clear', block_device]) | 49 | call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) |
2993 | 49 | dev_end = check_output(['blockdev', '--getsz', | 50 | dev_end = check_output(['blockdev', '--getsz', |
2994 | 50 | block_device]).decode('UTF-8') | 51 | block_device]).decode('UTF-8') |
2995 | 51 | gpt_end = int(dev_end.split()[0]) - 100 | 52 | gpt_end = int(dev_end.split()[0]) - 100 |
2996 | @@ -67,4 +68,4 @@ | |||
2997 | 67 | out = check_output(['mount']).decode('UTF-8') | 68 | out = check_output(['mount']).decode('UTF-8') |
2998 | 68 | if is_partition: | 69 | if is_partition: |
2999 | 69 | return bool(re.search(device + r"\b", out)) | 70 | return bool(re.search(device + r"\b", out)) |
3001 | 70 | return bool(re.search(device + r"[0-9]+\b", out)) | 71 | return bool(re.search(device + r"[0-9]*\b", out)) |
3002 | 71 | 72 | ||
3003 | === added file 'hooks/charmhelpers/core/files.py' | |||
3004 | --- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000 | |||
3005 | +++ hooks/charmhelpers/core/files.py 2015-12-01 15:05:49 +0000 | |||
3006 | @@ -0,0 +1,45 @@ | |||
3007 | 1 | #!/usr/bin/env python | ||
3008 | 2 | # -*- coding: utf-8 -*- | ||
3009 | 3 | |||
3010 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
3011 | 5 | # | ||
3012 | 6 | # This file is part of charm-helpers. | ||
3013 | 7 | # | ||
3014 | 8 | # charm-helpers is free software: you can redistribute it and/or modify | ||
3015 | 9 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
3016 | 10 | # published by the Free Software Foundation. | ||
3017 | 11 | # | ||
3018 | 12 | # charm-helpers is distributed in the hope that it will be useful, | ||
3019 | 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
3020 | 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
3021 | 15 | # GNU Lesser General Public License for more details. | ||
3022 | 16 | # | ||
3023 | 17 | # You should have received a copy of the GNU Lesser General Public License | ||
3024 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
3025 | 19 | |||
3026 | 20 | __author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' | ||
3027 | 21 | |||
3028 | 22 | import os | ||
3029 | 23 | import subprocess | ||
3030 | 24 | |||
3031 | 25 | |||
3032 | 26 | def sed(filename, before, after, flags='g'): | ||
3033 | 27 | """ | ||
3034 | 28 | Search and replaces the given pattern on filename. | ||
3035 | 29 | |||
3036 | 30 | :param filename: relative or absolute file path. | ||
3037 | 31 | :param before: expression to be replaced (see 'man sed') | ||
3038 | 32 | :param after: expression to replace with (see 'man sed') | ||
3039 | 33 | :param flags: sed-compatible regex flags in example, to make | ||
3040 | 34 | the search and replace case insensitive, specify ``flags="i"``. | ||
3041 | 35 | The ``g`` flag is always specified regardless, so you do not | ||
3042 | 36 | need to remember to include it when overriding this parameter. | ||
3043 | 37 | :returns: If the sed command exit code was zero then return, | ||
3044 | 38 | otherwise raise CalledProcessError. | ||
3045 | 39 | """ | ||
3046 | 40 | expression = r's/{0}/{1}/{2}'.format(before, | ||
3047 | 41 | after, flags) | ||
3048 | 42 | |||
3049 | 43 | return subprocess.check_call(["sed", "-i", "-r", "-e", | ||
3050 | 44 | expression, | ||
3051 | 45 | os.path.expanduser(filename)]) | ||
3052 | 0 | 46 | ||
3053 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
3054 | --- hooks/charmhelpers/core/hookenv.py 2015-06-10 07:35:12 +0000 | |||
3055 | +++ hooks/charmhelpers/core/hookenv.py 2015-12-01 15:05:49 +0000 | |||
3056 | @@ -21,7 +21,10 @@ | |||
3057 | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
3058 | 22 | 22 | ||
3059 | 23 | from __future__ import print_function | 23 | from __future__ import print_function |
3060 | 24 | import copy | ||
3061 | 25 | from distutils.version import LooseVersion | ||
3062 | 24 | from functools import wraps | 26 | from functools import wraps |
3063 | 27 | import glob | ||
3064 | 25 | import os | 28 | import os |
3065 | 26 | import json | 29 | import json |
3066 | 27 | import yaml | 30 | import yaml |
3067 | @@ -71,6 +74,7 @@ | |||
3068 | 71 | res = func(*args, **kwargs) | 74 | res = func(*args, **kwargs) |
3069 | 72 | cache[key] = res | 75 | cache[key] = res |
3070 | 73 | return res | 76 | return res |
3071 | 77 | wrapper._wrapped = func | ||
3072 | 74 | return wrapper | 78 | return wrapper |
3073 | 75 | 79 | ||
3074 | 76 | 80 | ||
3075 | @@ -170,9 +174,19 @@ | |||
3076 | 170 | return os.environ.get('JUJU_RELATION', None) | 174 | return os.environ.get('JUJU_RELATION', None) |
3077 | 171 | 175 | ||
3078 | 172 | 176 | ||
3082 | 173 | def relation_id(): | 177 | @cached |
3083 | 174 | """The relation ID for the current relation hook""" | 178 | def relation_id(relation_name=None, service_or_unit=None): |
3084 | 175 | return os.environ.get('JUJU_RELATION_ID', None) | 179 | """The relation ID for the current or a specified relation""" |
3085 | 180 | if not relation_name and not service_or_unit: | ||
3086 | 181 | return os.environ.get('JUJU_RELATION_ID', None) | ||
3087 | 182 | elif relation_name and service_or_unit: | ||
3088 | 183 | service_name = service_or_unit.split('/')[0] | ||
3089 | 184 | for relid in relation_ids(relation_name): | ||
3090 | 185 | remote_service = remote_service_name(relid) | ||
3091 | 186 | if remote_service == service_name: | ||
3092 | 187 | return relid | ||
3093 | 188 | else: | ||
3094 | 189 | raise ValueError('Must specify neither or both of relation_name and service_or_unit') | ||
3095 | 176 | 190 | ||
3096 | 177 | 191 | ||
3097 | 178 | def local_unit(): | 192 | def local_unit(): |
3098 | @@ -190,9 +204,20 @@ | |||
3099 | 190 | return local_unit().split('/')[0] | 204 | return local_unit().split('/')[0] |
3100 | 191 | 205 | ||
3101 | 192 | 206 | ||
3102 | 207 | @cached | ||
3103 | 208 | def remote_service_name(relid=None): | ||
3104 | 209 | """The remote service name for a given relation-id (or the current relation)""" | ||
3105 | 210 | if relid is None: | ||
3106 | 211 | unit = remote_unit() | ||
3107 | 212 | else: | ||
3108 | 213 | units = related_units(relid) | ||
3109 | 214 | unit = units[0] if units else None | ||
3110 | 215 | return unit.split('/')[0] if unit else None | ||
3111 | 216 | |||
3112 | 217 | |||
3113 | 193 | def hook_name(): | 218 | def hook_name(): |
3114 | 194 | """The name of the currently executing hook""" | 219 | """The name of the currently executing hook""" |
3116 | 195 | return os.path.basename(sys.argv[0]) | 220 | return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
3117 | 196 | 221 | ||
3118 | 197 | 222 | ||
3119 | 198 | class Config(dict): | 223 | class Config(dict): |
3120 | @@ -242,29 +267,7 @@ | |||
3121 | 242 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | 267 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
3122 | 243 | if os.path.exists(self.path): | 268 | if os.path.exists(self.path): |
3123 | 244 | self.load_previous() | 269 | self.load_previous() |
3147 | 245 | 270 | atexit(self._implicit_save) | |
3125 | 246 | def __getitem__(self, key): | ||
3126 | 247 | """For regular dict lookups, check the current juju config first, | ||
3127 | 248 | then the previous (saved) copy. This ensures that user-saved values | ||
3128 | 249 | will be returned by a dict lookup. | ||
3129 | 250 | |||
3130 | 251 | """ | ||
3131 | 252 | try: | ||
3132 | 253 | return dict.__getitem__(self, key) | ||
3133 | 254 | except KeyError: | ||
3134 | 255 | return (self._prev_dict or {})[key] | ||
3135 | 256 | |||
3136 | 257 | def get(self, key, default=None): | ||
3137 | 258 | try: | ||
3138 | 259 | return self[key] | ||
3139 | 260 | except KeyError: | ||
3140 | 261 | return default | ||
3141 | 262 | |||
3142 | 263 | def keys(self): | ||
3143 | 264 | prev_keys = [] | ||
3144 | 265 | if self._prev_dict is not None: | ||
3145 | 266 | prev_keys = self._prev_dict.keys() | ||
3146 | 267 | return list(set(prev_keys + list(dict.keys(self)))) | ||
3148 | 268 | 271 | ||
3149 | 269 | def load_previous(self, path=None): | 272 | def load_previous(self, path=None): |
3150 | 270 | """Load previous copy of config from disk. | 273 | """Load previous copy of config from disk. |
3151 | @@ -283,6 +286,9 @@ | |||
3152 | 283 | self.path = path or self.path | 286 | self.path = path or self.path |
3153 | 284 | with open(self.path) as f: | 287 | with open(self.path) as f: |
3154 | 285 | self._prev_dict = json.load(f) | 288 | self._prev_dict = json.load(f) |
3155 | 289 | for k, v in copy.deepcopy(self._prev_dict).items(): | ||
3156 | 290 | if k not in self: | ||
3157 | 291 | self[k] = v | ||
3158 | 286 | 292 | ||
3159 | 287 | def changed(self, key): | 293 | def changed(self, key): |
3160 | 288 | """Return True if the current value for this key is different from | 294 | """Return True if the current value for this key is different from |
3161 | @@ -314,13 +320,13 @@ | |||
3162 | 314 | instance. | 320 | instance. |
3163 | 315 | 321 | ||
3164 | 316 | """ | 322 | """ |
3165 | 317 | if self._prev_dict: | ||
3166 | 318 | for k, v in six.iteritems(self._prev_dict): | ||
3167 | 319 | if k not in self: | ||
3168 | 320 | self[k] = v | ||
3169 | 321 | with open(self.path, 'w') as f: | 323 | with open(self.path, 'w') as f: |
3170 | 322 | json.dump(self, f) | 324 | json.dump(self, f) |
3171 | 323 | 325 | ||
3172 | 326 | def _implicit_save(self): | ||
3173 | 327 | if self.implicit_save: | ||
3174 | 328 | self.save() | ||
3175 | 329 | |||
3176 | 324 | 330 | ||
3177 | 325 | @cached | 331 | @cached |
3178 | 326 | def config(scope=None): | 332 | def config(scope=None): |
3179 | @@ -485,6 +491,76 @@ | |||
3180 | 485 | 491 | ||
3181 | 486 | 492 | ||
3182 | 487 | @cached | 493 | @cached |
3183 | 494 | def peer_relation_id(): | ||
3184 | 495 | '''Get a peer relation id if a peer relation has been joined, else None.''' | ||
3185 | 496 | md = metadata() | ||
3186 | 497 | section = md.get('peers') | ||
3187 | 498 | if section: | ||
3188 | 499 | for key in section: | ||
3189 | 500 | relids = relation_ids(key) | ||
3190 | 501 | if relids: | ||
3191 | 502 | return relids[0] | ||
3192 | 503 | return None | ||
3193 | 504 | |||
3194 | 505 | |||
3195 | 506 | @cached | ||
3196 | 507 | def relation_to_interface(relation_name): | ||
3197 | 508 | """ | ||
3198 | 509 | Given the name of a relation, return the interface that relation uses. | ||
3199 | 510 | |||
3200 | 511 | :returns: The interface name, or ``None``. | ||
3201 | 512 | """ | ||
3202 | 513 | return relation_to_role_and_interface(relation_name)[1] | ||
3203 | 514 | |||
3204 | 515 | |||
3205 | 516 | @cached | ||
3206 | 517 | def relation_to_role_and_interface(relation_name): | ||
3207 | 518 | """ | ||
3208 | 519 | Given the name of a relation, return the role and the name of the interface | ||
3209 | 520 | that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). | ||
3210 | 521 | |||
3211 | 522 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | ||
3212 | 523 | """ | ||
3213 | 524 | _metadata = metadata() | ||
3214 | 525 | for role in ('provides', 'requires', 'peer'): | ||
3215 | 526 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | ||
3216 | 527 | if interface: | ||
3217 | 528 | return role, interface | ||
3218 | 529 | return None, None | ||
3219 | 530 | |||
3220 | 531 | |||
3221 | 532 | @cached | ||
3222 | 533 | def role_and_interface_to_relations(role, interface_name): | ||
3223 | 534 | """ | ||
3224 | 535 | Given a role and interface name, return a list of relation names for the | ||
3225 | 536 | current charm that use that interface under that role (where role is one | ||
3226 | 537 | of ``provides``, ``requires``, or ``peer``). | ||
3227 | 538 | |||
3228 | 539 | :returns: A list of relation names. | ||
3229 | 540 | """ | ||
3230 | 541 | _metadata = metadata() | ||
3231 | 542 | results = [] | ||
3232 | 543 | for relation_name, relation in _metadata.get(role, {}).items(): | ||
3233 | 544 | if relation['interface'] == interface_name: | ||
3234 | 545 | results.append(relation_name) | ||
3235 | 546 | return results | ||
3236 | 547 | |||
3237 | 548 | |||
3238 | 549 | @cached | ||
3239 | 550 | def interface_to_relations(interface_name): | ||
3240 | 551 | """ | ||
3241 | 552 | Given an interface, return a list of relation names for the current | ||
3242 | 553 | charm that use that interface. | ||
3243 | 554 | |||
3244 | 555 | :returns: A list of relation names. | ||
3245 | 556 | """ | ||
3246 | 557 | results = [] | ||
3247 | 558 | for role in ('provides', 'requires', 'peer'): | ||
3248 | 559 | results.extend(role_and_interface_to_relations(role, interface_name)) | ||
3249 | 560 | return results | ||
3250 | 561 | |||
3251 | 562 | |||
3252 | 563 | @cached | ||
3253 | 488 | def charm_name(): | 564 | def charm_name(): |
3254 | 489 | """Get the name of the current charm as is specified on metadata.yaml""" | 565 | """Get the name of the current charm as is specified on metadata.yaml""" |
3255 | 490 | return metadata().get('name') | 566 | return metadata().get('name') |
3256 | @@ -560,6 +636,38 @@ | |||
3257 | 560 | return unit_get('private-address') | 636 | return unit_get('private-address') |
3258 | 561 | 637 | ||
3259 | 562 | 638 | ||
3260 | 639 | @cached | ||
3261 | 640 | def storage_get(attribute="", storage_id=""): | ||
3262 | 641 | """Get storage attributes""" | ||
3263 | 642 | _args = ['storage-get', '--format=json'] | ||
3264 | 643 | if storage_id: | ||
3265 | 644 | _args.extend(('-s', storage_id)) | ||
3266 | 645 | if attribute: | ||
3267 | 646 | _args.append(attribute) | ||
3268 | 647 | try: | ||
3269 | 648 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
3270 | 649 | except ValueError: | ||
3271 | 650 | return None | ||
3272 | 651 | |||
3273 | 652 | |||
3274 | 653 | @cached | ||
3275 | 654 | def storage_list(storage_name=""): | ||
3276 | 655 | """List the storage IDs for the unit""" | ||
3277 | 656 | _args = ['storage-list', '--format=json'] | ||
3278 | 657 | if storage_name: | ||
3279 | 658 | _args.append(storage_name) | ||
3280 | 659 | try: | ||
3281 | 660 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
3282 | 661 | except ValueError: | ||
3283 | 662 | return None | ||
3284 | 663 | except OSError as e: | ||
3285 | 664 | import errno | ||
3286 | 665 | if e.errno == errno.ENOENT: | ||
3287 | 666 | # storage-list does not exist | ||
3288 | 667 | return [] | ||
3289 | 668 | raise | ||
3290 | 669 | |||
3291 | 670 | |||
3292 | 563 | class UnregisteredHookError(Exception): | 671 | class UnregisteredHookError(Exception): |
3293 | 564 | """Raised when an undefined hook is called""" | 672 | """Raised when an undefined hook is called""" |
3294 | 565 | pass | 673 | pass |
3295 | @@ -587,10 +695,14 @@ | |||
3296 | 587 | hooks.execute(sys.argv) | 695 | hooks.execute(sys.argv) |
3297 | 588 | """ | 696 | """ |
3298 | 589 | 697 | ||
3300 | 590 | def __init__(self, config_save=True): | 698 | def __init__(self, config_save=None): |
3301 | 591 | super(Hooks, self).__init__() | 699 | super(Hooks, self).__init__() |
3302 | 592 | self._hooks = {} | 700 | self._hooks = {} |
3304 | 593 | self._config_save = config_save | 701 | |
3305 | 702 | # For unknown reasons, we allow the Hooks constructor to override | ||
3306 | 703 | # config().implicit_save. | ||
3307 | 704 | if config_save is not None: | ||
3308 | 705 | config().implicit_save = config_save | ||
3309 | 594 | 706 | ||
3310 | 595 | def register(self, name, function): | 707 | def register(self, name, function): |
3311 | 596 | """Register a hook""" | 708 | """Register a hook""" |
3312 | @@ -598,13 +710,16 @@ | |||
3313 | 598 | 710 | ||
3314 | 599 | def execute(self, args): | 711 | def execute(self, args): |
3315 | 600 | """Execute a registered hook based on args[0]""" | 712 | """Execute a registered hook based on args[0]""" |
3316 | 713 | _run_atstart() | ||
3317 | 601 | hook_name = os.path.basename(args[0]) | 714 | hook_name = os.path.basename(args[0]) |
3318 | 602 | if hook_name in self._hooks: | 715 | if hook_name in self._hooks: |
3324 | 603 | self._hooks[hook_name]() | 716 | try: |
3325 | 604 | if self._config_save: | 717 | self._hooks[hook_name]() |
3326 | 605 | cfg = config() | 718 | except SystemExit as x: |
3327 | 606 | if cfg.implicit_save: | 719 | if x.code is None or x.code == 0: |
3328 | 607 | cfg.save() | 720 | _run_atexit() |
3329 | 721 | raise | ||
3330 | 722 | _run_atexit() | ||
3331 | 608 | else: | 723 | else: |
3332 | 609 | raise UnregisteredHookError(hook_name) | 724 | raise UnregisteredHookError(hook_name) |
3333 | 610 | 725 | ||
3334 | @@ -653,6 +768,21 @@ | |||
3335 | 653 | subprocess.check_call(['action-fail', message]) | 768 | subprocess.check_call(['action-fail', message]) |
3336 | 654 | 769 | ||
3337 | 655 | 770 | ||
3338 | 771 | def action_name(): | ||
3339 | 772 | """Get the name of the currently executing action.""" | ||
3340 | 773 | return os.environ.get('JUJU_ACTION_NAME') | ||
3341 | 774 | |||
3342 | 775 | |||
3343 | 776 | def action_uuid(): | ||
3344 | 777 | """Get the UUID of the currently executing action.""" | ||
3345 | 778 | return os.environ.get('JUJU_ACTION_UUID') | ||
3346 | 779 | |||
3347 | 780 | |||
3348 | 781 | def action_tag(): | ||
3349 | 782 | """Get the tag for the currently executing action.""" | ||
3350 | 783 | return os.environ.get('JUJU_ACTION_TAG') | ||
3351 | 784 | |||
3352 | 785 | |||
3353 | 656 | def status_set(workload_state, message): | 786 | def status_set(workload_state, message): |
3354 | 657 | """Set the workload state with a message | 787 | """Set the workload state with a message |
3355 | 658 | 788 | ||
3356 | @@ -682,25 +812,28 @@ | |||
3357 | 682 | 812 | ||
3358 | 683 | 813 | ||
3359 | 684 | def status_get(): | 814 | def status_get(): |
3364 | 685 | """Retrieve the previously set juju workload state | 815 | """Retrieve the previously set juju workload state and message |
3365 | 686 | 816 | ||
3366 | 687 | If the status-set command is not found then assume this is juju < 1.23 and | 817 | If the status-get command is not found then assume this is juju < 1.23 and |
3367 | 688 | return 'unknown' | 818 | return 'unknown', "" |
3368 | 819 | |||
3369 | 689 | """ | 820 | """ |
3371 | 690 | cmd = ['status-get'] | 821 | cmd = ['status-get', "--format=json", "--include-data"] |
3372 | 691 | try: | 822 | try: |
3376 | 692 | raw_status = subprocess.check_output(cmd, universal_newlines=True) | 823 | raw_status = subprocess.check_output(cmd) |
3374 | 693 | status = raw_status.rstrip() | ||
3375 | 694 | return status | ||
3377 | 695 | except OSError as e: | 824 | except OSError as e: |
3378 | 696 | if e.errno == errno.ENOENT: | 825 | if e.errno == errno.ENOENT: |
3380 | 697 | return 'unknown' | 826 | return ('unknown', "") |
3381 | 698 | else: | 827 | else: |
3382 | 699 | raise | 828 | raise |
3383 | 829 | else: | ||
3384 | 830 | status = json.loads(raw_status.decode("UTF-8")) | ||
3385 | 831 | return (status["status"], status["message"]) | ||
3386 | 700 | 832 | ||
3387 | 701 | 833 | ||
3388 | 702 | def translate_exc(from_exc, to_exc): | 834 | def translate_exc(from_exc, to_exc): |
3389 | 703 | def inner_translate_exc1(f): | 835 | def inner_translate_exc1(f): |
3390 | 836 | @wraps(f) | ||
3391 | 704 | def inner_translate_exc2(*args, **kwargs): | 837 | def inner_translate_exc2(*args, **kwargs): |
3392 | 705 | try: | 838 | try: |
3393 | 706 | return f(*args, **kwargs) | 839 | return f(*args, **kwargs) |
3394 | @@ -732,13 +865,80 @@ | |||
3395 | 732 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | 865 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3396 | 733 | def leader_set(settings=None, **kwargs): | 866 | def leader_set(settings=None, **kwargs): |
3397 | 734 | """Juju leader set value(s)""" | 867 | """Juju leader set value(s)""" |
3399 | 735 | log("Juju leader-set '%s'" % (settings), level=DEBUG) | 868 | # Don't log secrets. |
3400 | 869 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
3401 | 736 | cmd = ['leader-set'] | 870 | cmd = ['leader-set'] |
3402 | 737 | settings = settings or {} | 871 | settings = settings or {} |
3403 | 738 | settings.update(kwargs) | 872 | settings.update(kwargs) |
3405 | 739 | for k, v in settings.iteritems(): | 873 | for k, v in settings.items(): |
3406 | 740 | if v is None: | 874 | if v is None: |
3407 | 741 | cmd.append('{}='.format(k)) | 875 | cmd.append('{}='.format(k)) |
3408 | 742 | else: | 876 | else: |
3409 | 743 | cmd.append('{}={}'.format(k, v)) | 877 | cmd.append('{}={}'.format(k, v)) |
3410 | 744 | subprocess.check_call(cmd) | 878 | subprocess.check_call(cmd) |
3411 | 879 | |||
3412 | 880 | |||
3413 | 881 | @cached | ||
3414 | 882 | def juju_version(): | ||
3415 | 883 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
3416 | 884 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
3417 | 885 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
3418 | 886 | return subprocess.check_output([jujud, 'version'], | ||
3419 | 887 | universal_newlines=True).strip() | ||
3420 | 888 | |||
3421 | 889 | |||
3422 | 890 | @cached | ||
3423 | 891 | def has_juju_version(minimum_version): | ||
3424 | 892 | """Return True if the Juju version is at least the provided version""" | ||
3425 | 893 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
3426 | 894 | |||
3427 | 895 | |||
3428 | 896 | _atexit = [] | ||
3429 | 897 | _atstart = [] | ||
3430 | 898 | |||
3431 | 899 | |||
3432 | 900 | def atstart(callback, *args, **kwargs): | ||
3433 | 901 | '''Schedule a callback to run before the main hook. | ||
3434 | 902 | |||
3435 | 903 | Callbacks are run in the order they were added. | ||
3436 | 904 | |||
3437 | 905 | This is useful for modules and classes to perform initialization | ||
3438 | 906 | and inject behavior. In particular: | ||
3439 | 907 | |||
3440 | 908 | - Run common code before all of your hooks, such as logging | ||
3441 | 909 | the hook name or interesting relation data. | ||
3442 | 910 | - Defer object or module initialization that requires a hook | ||
3443 | 911 | context until we know there actually is a hook context, | ||
3444 | 912 | making testing easier. | ||
3445 | 913 | - Rather than requiring charm authors to include boilerplate to | ||
3446 | 914 | invoke your helper's behavior, have it run automatically if | ||
3447 | 915 | your object is instantiated or module imported. | ||
3448 | 916 | |||
3449 | 917 | This is not at all useful after your hook framework as been launched. | ||
3450 | 918 | ''' | ||
3451 | 919 | global _atstart | ||
3452 | 920 | _atstart.append((callback, args, kwargs)) | ||
3453 | 921 | |||
3454 | 922 | |||
3455 | 923 | def atexit(callback, *args, **kwargs): | ||
3456 | 924 | '''Schedule a callback to run on successful hook completion. | ||
3457 | 925 | |||
3458 | 926 | Callbacks are run in the reverse order that they were added.''' | ||
3459 | 927 | _atexit.append((callback, args, kwargs)) | ||
3460 | 928 | |||
3461 | 929 | |||
3462 | 930 | def _run_atstart(): | ||
3463 | 931 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
3464 | 932 | global _atstart | ||
3465 | 933 | for callback, args, kwargs in _atstart: | ||
3466 | 934 | callback(*args, **kwargs) | ||
3467 | 935 | del _atstart[:] | ||
3468 | 936 | |||
3469 | 937 | |||
3470 | 938 | def _run_atexit(): | ||
3471 | 939 | '''Hook frameworks must invoke this after the main hook body has | ||
3472 | 940 | successfully completed. Do not invoke it if the hook fails.''' | ||
3473 | 941 | global _atexit | ||
3474 | 942 | for callback, args, kwargs in reversed(_atexit): | ||
3475 | 943 | callback(*args, **kwargs) | ||
3476 | 944 | del _atexit[:] | ||
3477 | 745 | 945 | ||
3478 | === modified file 'hooks/charmhelpers/core/host.py' | |||
3479 | --- hooks/charmhelpers/core/host.py 2015-07-01 13:35:47 +0000 | |||
3480 | +++ hooks/charmhelpers/core/host.py 2015-12-01 15:05:49 +0000 | |||
3481 | @@ -63,6 +63,56 @@ | |||
3482 | 63 | return service_result | 63 | return service_result |
3483 | 64 | 64 | ||
3484 | 65 | 65 | ||
3485 | 66 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): | ||
3486 | 67 | """Pause a system service. | ||
3487 | 68 | |||
3488 | 69 | Stop it, and prevent it from starting again at boot.""" | ||
3489 | 70 | stopped = True | ||
3490 | 71 | if service_running(service_name): | ||
3491 | 72 | stopped = service_stop(service_name) | ||
3492 | 73 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
3493 | 74 | sysv_file = os.path.join(initd_dir, service_name) | ||
3494 | 75 | if os.path.exists(upstart_file): | ||
3495 | 76 | override_path = os.path.join( | ||
3496 | 77 | init_dir, '{}.override'.format(service_name)) | ||
3497 | 78 | with open(override_path, 'w') as fh: | ||
3498 | 79 | fh.write("manual\n") | ||
3499 | 80 | elif os.path.exists(sysv_file): | ||
3500 | 81 | subprocess.check_call(["update-rc.d", service_name, "disable"]) | ||
3501 | 82 | else: | ||
3502 | 83 | # XXX: Support SystemD too | ||
3503 | 84 | raise ValueError( | ||
3504 | 85 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | ||
3505 | 86 | service_name, upstart_file, sysv_file)) | ||
3506 | 87 | return stopped | ||
3507 | 88 | |||
3508 | 89 | |||
3509 | 90 | def service_resume(service_name, init_dir="/etc/init", | ||
3510 | 91 | initd_dir="/etc/init.d"): | ||
3511 | 92 | """Resume a system service. | ||
3512 | 93 | |||
3513 | 94 | Reenable starting again at boot. Start the service""" | ||
3514 | 95 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
3515 | 96 | sysv_file = os.path.join(initd_dir, service_name) | ||
3516 | 97 | if os.path.exists(upstart_file): | ||
3517 | 98 | override_path = os.path.join( | ||
3518 | 99 | init_dir, '{}.override'.format(service_name)) | ||
3519 | 100 | if os.path.exists(override_path): | ||
3520 | 101 | os.unlink(override_path) | ||
3521 | 102 | elif os.path.exists(sysv_file): | ||
3522 | 103 | subprocess.check_call(["update-rc.d", service_name, "enable"]) | ||
3523 | 104 | else: | ||
3524 | 105 | # XXX: Support SystemD too | ||
3525 | 106 | raise ValueError( | ||
3526 | 107 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | ||
3527 | 108 | service_name, upstart_file, sysv_file)) | ||
3528 | 109 | |||
3529 | 110 | started = service_running(service_name) | ||
3530 | 111 | if not started: | ||
3531 | 112 | started = service_start(service_name) | ||
3532 | 113 | return started | ||
3533 | 114 | |||
3534 | 115 | |||
3535 | 66 | def service(action, service_name): | 116 | def service(action, service_name): |
3536 | 67 | """Control a system service""" | 117 | """Control a system service""" |
3537 | 68 | cmd = ['service', service_name, action] | 118 | cmd = ['service', service_name, action] |
3538 | @@ -119,8 +169,9 @@ | |||
3539 | 119 | 169 | ||
3540 | 120 | 170 | ||
3541 | 121 | def user_exists(username): | 171 | def user_exists(username): |
3542 | 172 | """Check if a user exists""" | ||
3543 | 122 | try: | 173 | try: |
3545 | 123 | user_info = pwd.getpwnam(username) | 174 | pwd.getpwnam(username) |
3546 | 124 | user_exists = True | 175 | user_exists = True |
3547 | 125 | except KeyError: | 176 | except KeyError: |
3548 | 126 | user_exists = False | 177 | user_exists = False |
3549 | @@ -149,11 +200,7 @@ | |||
3550 | 149 | 200 | ||
3551 | 150 | def add_user_to_group(username, group): | 201 | def add_user_to_group(username, group): |
3552 | 151 | """Add a user to a group""" | 202 | """Add a user to a group""" |
3558 | 152 | cmd = [ | 203 | cmd = ['gpasswd', '-a', username, group] |
3554 | 153 | 'gpasswd', '-a', | ||
3555 | 154 | username, | ||
3556 | 155 | group | ||
3557 | 156 | ] | ||
3559 | 157 | log("Adding user {} to group {}".format(username, group)) | 204 | log("Adding user {} to group {}".format(username, group)) |
3560 | 158 | subprocess.check_call(cmd) | 205 | subprocess.check_call(cmd) |
3561 | 159 | 206 | ||
3562 | @@ -263,8 +310,8 @@ | |||
3563 | 263 | return system_mounts | 310 | return system_mounts |
3564 | 264 | 311 | ||
3565 | 265 | 312 | ||
3566 | 266 | |||
3567 | 267 | def fstab_mount(mountpoint): | 313 | def fstab_mount(mountpoint): |
3568 | 314 | """Mount filesystem using fstab""" | ||
3569 | 268 | cmd_args = ['mount', mountpoint] | 315 | cmd_args = ['mount', mountpoint] |
3570 | 269 | try: | 316 | try: |
3571 | 270 | subprocess.check_output(cmd_args) | 317 | subprocess.check_output(cmd_args) |
3572 | @@ -390,25 +437,80 @@ | |||
3573 | 390 | return(''.join(random_chars)) | 437 | return(''.join(random_chars)) |
3574 | 391 | 438 | ||
3575 | 392 | 439 | ||
3577 | 393 | def list_nics(nic_type): | 440 | def is_phy_iface(interface): |
3578 | 441 | """Returns True if interface is not virtual, otherwise False.""" | ||
3579 | 442 | if interface: | ||
3580 | 443 | sys_net = '/sys/class/net' | ||
3581 | 444 | if os.path.isdir(sys_net): | ||
3582 | 445 | for iface in glob.glob(os.path.join(sys_net, '*')): | ||
3583 | 446 | if '/virtual/' in os.path.realpath(iface): | ||
3584 | 447 | continue | ||
3585 | 448 | |||
3586 | 449 | if interface == os.path.basename(iface): | ||
3587 | 450 | return True | ||
3588 | 451 | |||
3589 | 452 | return False | ||
3590 | 453 | |||
3591 | 454 | |||
3592 | 455 | def get_bond_master(interface): | ||
3593 | 456 | """Returns bond master if interface is bond slave otherwise None. | ||
3594 | 457 | |||
3595 | 458 | NOTE: the provided interface is expected to be physical | ||
3596 | 459 | """ | ||
3597 | 460 | if interface: | ||
3598 | 461 | iface_path = '/sys/class/net/%s' % (interface) | ||
3599 | 462 | if os.path.exists(iface_path): | ||
3600 | 463 | if '/virtual/' in os.path.realpath(iface_path): | ||
3601 | 464 | return None | ||
3602 | 465 | |||
3603 | 466 | master = os.path.join(iface_path, 'master') | ||
3604 | 467 | if os.path.exists(master): | ||
3605 | 468 | master = os.path.realpath(master) | ||
3606 | 469 | # make sure it is a bond master | ||
3607 | 470 | if os.path.exists(os.path.join(master, 'bonding')): | ||
3608 | 471 | return os.path.basename(master) | ||
3609 | 472 | |||
3610 | 473 | return None | ||
3611 | 474 | |||
3612 | 475 | |||
3613 | 476 | def list_nics(nic_type=None): | ||
3614 | 394 | '''Return a list of nics of given type(s)''' | 477 | '''Return a list of nics of given type(s)''' |
3615 | 395 | if isinstance(nic_type, six.string_types): | 478 | if isinstance(nic_type, six.string_types): |
3616 | 396 | int_types = [nic_type] | 479 | int_types = [nic_type] |
3617 | 397 | else: | 480 | else: |
3618 | 398 | int_types = nic_type | 481 | int_types = nic_type |
3619 | 482 | |||
3620 | 399 | interfaces = [] | 483 | interfaces = [] |
3623 | 400 | for int_type in int_types: | 484 | if nic_type: |
3624 | 401 | cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] | 485 | for int_type in int_types: |
3625 | 486 | cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] | ||
3626 | 487 | ip_output = subprocess.check_output(cmd).decode('UTF-8') | ||
3627 | 488 | ip_output = ip_output.split('\n') | ||
3628 | 489 | ip_output = (line for line in ip_output if line) | ||
3629 | 490 | for line in ip_output: | ||
3630 | 491 | if line.split()[1].startswith(int_type): | ||
3631 | 492 | matched = re.search('.*: (' + int_type + | ||
3632 | 493 | r'[0-9]+\.[0-9]+)@.*', line) | ||
3633 | 494 | if matched: | ||
3634 | 495 | iface = matched.groups()[0] | ||
3635 | 496 | else: | ||
3636 | 497 | iface = line.split()[1].replace(":", "") | ||
3637 | 498 | |||
3638 | 499 | if iface not in interfaces: | ||
3639 | 500 | interfaces.append(iface) | ||
3640 | 501 | else: | ||
3641 | 502 | cmd = ['ip', 'a'] | ||
3642 | 402 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') | 503 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') |
3644 | 403 | ip_output = (line for line in ip_output if line) | 504 | ip_output = (line.strip() for line in ip_output if line) |
3645 | 505 | |||
3646 | 506 | key = re.compile('^[0-9]+:\s+(.+):') | ||
3647 | 404 | for line in ip_output: | 507 | for line in ip_output: |
3655 | 405 | if line.split()[1].startswith(int_type): | 508 | matched = re.search(key, line) |
3656 | 406 | matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) | 509 | if matched: |
3657 | 407 | if matched: | 510 | iface = matched.group(1) |
3658 | 408 | interface = matched.groups()[0] | 511 | iface = iface.partition("@")[0] |
3659 | 409 | else: | 512 | if iface not in interfaces: |
3660 | 410 | interface = line.split()[1].replace(":", "") | 513 | interfaces.append(iface) |
3654 | 411 | interfaces.append(interface) | ||
3661 | 412 | 514 | ||
3662 | 413 | return interfaces | 515 | return interfaces |
3663 | 414 | 516 | ||
3664 | @@ -440,23 +542,6 @@ | |||
3665 | 440 | return hwaddr | 542 | return hwaddr |
3666 | 441 | 543 | ||
3667 | 442 | 544 | ||
3668 | 443 | def get_mac_nic_map(): | ||
3669 | 444 | '''Return a dict of macs and their corresponding nics''' | ||
3670 | 445 | cmd = ['ip', '-o', '-0', 'addr', 'list'] | ||
3671 | 446 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') | ||
3672 | 447 | mac_nic_map = {} | ||
3673 | 448 | for line in ip_output: | ||
3674 | 449 | columns = line.split() | ||
3675 | 450 | if 'link/ether' in columns: | ||
3676 | 451 | hwaddr = columns[columns.index('link/ether') + 1] | ||
3677 | 452 | nic = columns[1].replace(':', '') | ||
3678 | 453 | if mac_nic_map.get(hwaddr): | ||
3679 | 454 | mac_nic_map[hwaddr].append(nic) | ||
3680 | 455 | else: | ||
3681 | 456 | mac_nic_map[hwaddr] = [nic] | ||
3682 | 457 | return mac_nic_map | ||
3683 | 458 | |||
3684 | 459 | |||
3685 | 460 | def cmp_pkgrevno(package, revno, pkgcache=None): | 545 | def cmp_pkgrevno(package, revno, pkgcache=None): |
3686 | 461 | '''Compare supplied revno with the revno of the installed package | 546 | '''Compare supplied revno with the revno of the installed package |
3687 | 462 | 547 | ||
3688 | @@ -485,7 +570,14 @@ | |||
3689 | 485 | os.chdir(cur) | 570 | os.chdir(cur) |
3690 | 486 | 571 | ||
3691 | 487 | 572 | ||
3693 | 488 | def chownr(path, owner, group, follow_links=True): | 573 | def chownr(path, owner, group, follow_links=True, chowntopdir=False): |
3694 | 574 | """ | ||
3695 | 575 | Recursively change user and group ownership of files and directories | ||
3696 | 576 | in given path. Doesn't chown path itself by default, only its children. | ||
3697 | 577 | |||
3698 | 578 | :param bool follow_links: Also Chown links if True | ||
3699 | 579 | :param bool chowntopdir: Also chown path itself if True | ||
3700 | 580 | """ | ||
3701 | 489 | uid = pwd.getpwnam(owner).pw_uid | 581 | uid = pwd.getpwnam(owner).pw_uid |
3702 | 490 | gid = grp.getgrnam(group).gr_gid | 582 | gid = grp.getgrnam(group).gr_gid |
3703 | 491 | if follow_links: | 583 | if follow_links: |
3704 | @@ -493,6 +585,10 @@ | |||
3705 | 493 | else: | 585 | else: |
3706 | 494 | chown = os.lchown | 586 | chown = os.lchown |
3707 | 495 | 587 | ||
3708 | 588 | if chowntopdir: | ||
3709 | 589 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) | ||
3710 | 590 | if not broken_symlink: | ||
3711 | 591 | chown(path, uid, gid) | ||
3712 | 496 | for root, dirs, files in os.walk(path): | 592 | for root, dirs, files in os.walk(path): |
3713 | 497 | for name in dirs + files: | 593 | for name in dirs + files: |
3714 | 498 | full = os.path.join(root, name) | 594 | full = os.path.join(root, name) |
3715 | @@ -503,3 +599,19 @@ | |||
3716 | 503 | 599 | ||
3717 | 504 | def lchownr(path, owner, group): | 600 | def lchownr(path, owner, group): |
3718 | 505 | chownr(path, owner, group, follow_links=False) | 601 | chownr(path, owner, group, follow_links=False) |
3719 | 602 | |||
3720 | 603 | |||
3721 | 604 | def get_total_ram(): | ||
3722 | 605 | '''The total amount of system RAM in bytes. | ||
3723 | 606 | |||
3724 | 607 | This is what is reported by the OS, and may be overcommitted when | ||
3725 | 608 | there are multiple containers hosted on the same machine. | ||
3726 | 609 | ''' | ||
3727 | 610 | with open('/proc/meminfo', 'r') as f: | ||
3728 | 611 | for line in f.readlines(): | ||
3729 | 612 | if line: | ||
3730 | 613 | key, value, unit = line.split() | ||
3731 | 614 | if key == 'MemTotal:': | ||
3732 | 615 | assert unit == 'kB', 'Unknown unit' | ||
3733 | 616 | return int(value) * 1024 # Classic, not KiB. | ||
3734 | 617 | raise NotImplementedError() | ||
3735 | 506 | 618 | ||
3736 | === modified file 'hooks/charmhelpers/core/hugepage.py' | |||
3737 | --- hooks/charmhelpers/core/hugepage.py 2015-06-22 09:26:28 +0000 | |||
3738 | +++ hooks/charmhelpers/core/hugepage.py 2015-12-01 15:05:49 +0000 | |||
3739 | @@ -1,5 +1,3 @@ | |||
3740 | 1 | |||
3741 | 2 | #!/usr/bin/env python | ||
3742 | 3 | # -*- coding: utf-8 -*- | 1 | # -*- coding: utf-8 -*- |
3743 | 4 | 2 | ||
3744 | 5 | # Copyright 2014-2015 Canonical Limited. | 3 | # Copyright 2014-2015 Canonical Limited. |
3745 | @@ -19,36 +17,55 @@ | |||
3746 | 19 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 17 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3747 | 20 | 18 | ||
3748 | 21 | import yaml | 19 | import yaml |
3753 | 22 | from charmhelpers.core.fstab import Fstab | 20 | from charmhelpers.core import fstab |
3754 | 23 | from charmhelpers.core.sysctl import ( | 21 | from charmhelpers.core import sysctl |
3751 | 24 | create, | ||
3752 | 25 | ) | ||
3755 | 26 | from charmhelpers.core.host import ( | 22 | from charmhelpers.core.host import ( |
3756 | 27 | add_group, | 23 | add_group, |
3757 | 28 | add_user_to_group, | 24 | add_user_to_group, |
3758 | 29 | fstab_mount, | 25 | fstab_mount, |
3759 | 30 | mkdir, | 26 | mkdir, |
3760 | 31 | ) | 27 | ) |
3761 | 28 | from charmhelpers.core.strutils import bytes_from_string | ||
3762 | 29 | from subprocess import check_output | ||
3763 | 30 | |||
3764 | 32 | 31 | ||
3765 | 33 | def hugepage_support(user, group='hugetlb', nr_hugepages=256, | 32 | def hugepage_support(user, group='hugetlb', nr_hugepages=256, |
3768 | 34 | max_map_count=65536, mnt_point='/hugepages', | 33 | max_map_count=65536, mnt_point='/run/hugepages/kvm', |
3769 | 35 | pagesize='2MB', mount=True): | 34 | pagesize='2MB', mount=True, set_shmmax=False): |
3770 | 35 | """Enable hugepages on system. | ||
3771 | 36 | |||
3772 | 37 | Args: | ||
3773 | 38 | user (str) -- Username to allow access to hugepages to | ||
3774 | 39 | group (str) -- Group name to own hugepages | ||
3775 | 40 | nr_hugepages (int) -- Number of pages to reserve | ||
3776 | 41 | max_map_count (int) -- Number of Virtual Memory Areas a process can own | ||
3777 | 42 | mnt_point (str) -- Directory to mount hugepages on | ||
3778 | 43 | pagesize (str) -- Size of hugepages | ||
3779 | 44 | mount (bool) -- Whether to Mount hugepages | ||
3780 | 45 | """ | ||
3781 | 36 | group_info = add_group(group) | 46 | group_info = add_group(group) |
3782 | 37 | gid = group_info.gr_gid | 47 | gid = group_info.gr_gid |
3783 | 38 | add_user_to_group(user, group) | 48 | add_user_to_group(user, group) |
3784 | 49 | if max_map_count < 2 * nr_hugepages: | ||
3785 | 50 | max_map_count = 2 * nr_hugepages | ||
3786 | 39 | sysctl_settings = { | 51 | sysctl_settings = { |
3787 | 40 | 'vm.nr_hugepages': nr_hugepages, | 52 | 'vm.nr_hugepages': nr_hugepages, |
3789 | 41 | 'vm.max_map_count': max_map_count, # 1GB | 53 | 'vm.max_map_count': max_map_count, |
3790 | 42 | 'vm.hugetlb_shm_group': gid, | 54 | 'vm.hugetlb_shm_group': gid, |
3791 | 43 | } | 55 | } |
3793 | 44 | create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') | 56 | if set_shmmax: |
3794 | 57 | shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) | ||
3795 | 58 | shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages | ||
3796 | 59 | if shmmax_minsize > shmmax_current: | ||
3797 | 60 | sysctl_settings['kernel.shmmax'] = shmmax_minsize | ||
3798 | 61 | sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') | ||
3799 | 45 | mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) | 62 | mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) |
3802 | 46 | fstab = Fstab() | 63 | lfstab = fstab.Fstab() |
3803 | 47 | fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point) | 64 | fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) |
3804 | 48 | if fstab_entry: | 65 | if fstab_entry: |
3809 | 49 | fstab.remove_entry(fstab_entry) | 66 | lfstab.remove_entry(fstab_entry) |
3810 | 50 | entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs', | 67 | entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', |
3811 | 51 | 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) | 68 | 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
3812 | 52 | fstab.add_entry(entry) | 69 | lfstab.add_entry(entry) |
3813 | 53 | if mount: | 70 | if mount: |
3814 | 54 | fstab_mount(mnt_point) | 71 | fstab_mount(mnt_point) |
3815 | 55 | 72 | ||
3816 | === added file 'hooks/charmhelpers/core/kernel.py' | |||
3817 | --- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000 | |||
3818 | +++ hooks/charmhelpers/core/kernel.py 2015-12-01 15:05:49 +0000 | |||
3819 | @@ -0,0 +1,68 @@ | |||
3820 | 1 | #!/usr/bin/env python | ||
3821 | 2 | # -*- coding: utf-8 -*- | ||
3822 | 3 | |||
3823 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
3824 | 5 | # | ||
3825 | 6 | # This file is part of charm-helpers. | ||
3826 | 7 | # | ||
3827 | 8 | # charm-helpers is free software: you can redistribute it and/or modify | ||
3828 | 9 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
3829 | 10 | # published by the Free Software Foundation. | ||
3830 | 11 | # | ||
3831 | 12 | # charm-helpers is distributed in the hope that it will be useful, | ||
3832 | 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
3833 | 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
3834 | 15 | # GNU Lesser General Public License for more details. | ||
3835 | 16 | # | ||
3836 | 17 | # You should have received a copy of the GNU Lesser General Public License | ||
3837 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
3838 | 19 | |||
3839 | 20 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
3840 | 21 | |||
3841 | 22 | from charmhelpers.core.hookenv import ( | ||
3842 | 23 | log, | ||
3843 | 24 | INFO | ||
3844 | 25 | ) | ||
3845 | 26 | |||
3846 | 27 | from subprocess import check_call, check_output | ||
3847 | 28 | import re | ||
3848 | 29 | |||
3849 | 30 | |||
3850 | 31 | def modprobe(module, persist=True): | ||
3851 | 32 | """Load a kernel module and configure for auto-load on reboot.""" | ||
3852 | 33 | cmd = ['modprobe', module] | ||
3853 | 34 | |||
3854 | 35 | log('Loading kernel module %s' % module, level=INFO) | ||
3855 | 36 | |||
3856 | 37 | check_call(cmd) | ||
3857 | 38 | if persist: | ||
3858 | 39 | with open('/etc/modules', 'r+') as modules: | ||
3859 | 40 | if module not in modules.read(): | ||
3860 | 41 | modules.write(module) | ||
3861 | 42 | |||
3862 | 43 | |||
3863 | 44 | def rmmod(module, force=False): | ||
3864 | 45 | """Remove a module from the linux kernel""" | ||
3865 | 46 | cmd = ['rmmod'] | ||
3866 | 47 | if force: | ||
3867 | 48 | cmd.append('-f') | ||
3868 | 49 | cmd.append(module) | ||
3869 | 50 | log('Removing kernel module %s' % module, level=INFO) | ||
3870 | 51 | return check_call(cmd) | ||
3871 | 52 | |||
3872 | 53 | |||
3873 | 54 | def lsmod(): | ||
3874 | 55 | """Shows what kernel modules are currently loaded""" | ||
3875 | 56 | return check_output(['lsmod'], | ||
3876 | 57 | universal_newlines=True) | ||
3877 | 58 | |||
3878 | 59 | |||
3879 | 60 | def is_module_loaded(module): | ||
3880 | 61 | """Checks if a kernel module is already loaded""" | ||
3881 | 62 | matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) | ||
3882 | 63 | return len(matches) > 0 | ||
3883 | 64 | |||
3884 | 65 | |||
3885 | 66 | def update_initramfs(version='all'): | ||
3886 | 67 | """Updates an initramfs image""" | ||
3887 | 68 | return check_call(["update-initramfs", "-k", version, "-u"]) | ||
3888 | 0 | 69 | ||
3889 | === modified file 'hooks/charmhelpers/core/services/base.py' | |||
3890 | --- hooks/charmhelpers/core/services/base.py 2015-06-10 07:35:12 +0000 | |||
3891 | +++ hooks/charmhelpers/core/services/base.py 2015-12-01 15:05:49 +0000 | |||
3892 | @@ -128,15 +128,18 @@ | |||
3893 | 128 | """ | 128 | """ |
3894 | 129 | Handle the current hook by doing The Right Thing with the registered services. | 129 | Handle the current hook by doing The Right Thing with the registered services. |
3895 | 130 | """ | 130 | """ |
3905 | 131 | hook_name = hookenv.hook_name() | 131 | hookenv._run_atstart() |
3906 | 132 | if hook_name == 'stop': | 132 | try: |
3907 | 133 | self.stop_services() | 133 | hook_name = hookenv.hook_name() |
3908 | 134 | else: | 134 | if hook_name == 'stop': |
3909 | 135 | self.reconfigure_services() | 135 | self.stop_services() |
3910 | 136 | self.provide_data() | 136 | else: |
3911 | 137 | cfg = hookenv.config() | 137 | self.reconfigure_services() |
3912 | 138 | if cfg.implicit_save: | 138 | self.provide_data() |
3913 | 139 | cfg.save() | 139 | except SystemExit as x: |
3914 | 140 | if x.code is None or x.code == 0: | ||
3915 | 141 | hookenv._run_atexit() | ||
3916 | 142 | hookenv._run_atexit() | ||
3917 | 140 | 143 | ||
3918 | 141 | def provide_data(self): | 144 | def provide_data(self): |
3919 | 142 | """ | 145 | """ |
3920 | 143 | 146 | ||
3921 | === modified file 'hooks/charmhelpers/core/services/helpers.py' | |||
3922 | --- hooks/charmhelpers/core/services/helpers.py 2015-06-12 12:22:51 +0000 | |||
3923 | +++ hooks/charmhelpers/core/services/helpers.py 2015-12-01 15:05:49 +0000 | |||
3924 | @@ -16,6 +16,7 @@ | |||
3925 | 16 | 16 | ||
3926 | 17 | import os | 17 | import os |
3927 | 18 | import yaml | 18 | import yaml |
3928 | 19 | |||
3929 | 19 | from charmhelpers.core import hookenv | 20 | from charmhelpers.core import hookenv |
3930 | 20 | from charmhelpers.core import host | 21 | from charmhelpers.core import host |
3931 | 21 | from charmhelpers.core import templating | 22 | from charmhelpers.core import templating |
3932 | @@ -240,42 +241,43 @@ | |||
3933 | 240 | action. | 241 | action. |
3934 | 241 | 242 | ||
3935 | 242 | :param str source: The template source file, relative to | 243 | :param str source: The template source file, relative to |
3937 | 243 | `$CHARM_DIR/templates` | 244 | `$CHARM_DIR/templates` |
3938 | 244 | 245 | ||
3939 | 245 | :param str target: The target to write the rendered template to | 246 | :param str target: The target to write the rendered template to |
3940 | 246 | :param str owner: The owner of the rendered file | 247 | :param str owner: The owner of the rendered file |
3941 | 247 | :param str group: The group of the rendered file | 248 | :param str group: The group of the rendered file |
3942 | 248 | :param int perms: The permissions of the rendered file | 249 | :param int perms: The permissions of the rendered file |
3943 | 249 | :param list template_searchpath: List of paths to search for template in | ||
3944 | 250 | :param partial on_change_action: functools partial to be executed when | 250 | :param partial on_change_action: functools partial to be executed when |
3945 | 251 | rendered file changes | 251 | rendered file changes |
3946 | 252 | :param jinja2 loader template_loader: A jinja2 template loader | ||
3947 | 252 | """ | 253 | """ |
3948 | 253 | def __init__(self, source, target, | 254 | def __init__(self, source, target, |
3949 | 254 | owner='root', group='root', perms=0o444, | 255 | owner='root', group='root', perms=0o444, |
3951 | 255 | template_searchpath=None, on_change_action=None): | 256 | on_change_action=None, template_loader=None): |
3952 | 256 | self.source = source | 257 | self.source = source |
3953 | 257 | self.target = target | 258 | self.target = target |
3954 | 258 | self.owner = owner | 259 | self.owner = owner |
3955 | 259 | self.group = group | 260 | self.group = group |
3956 | 260 | self.perms = perms | 261 | self.perms = perms |
3957 | 261 | self.template_searchpath = template_searchpath | ||
3958 | 262 | self.on_change_action = on_change_action | 262 | self.on_change_action = on_change_action |
3959 | 263 | self.template_loader = template_loader | ||
3960 | 263 | 264 | ||
3961 | 264 | def __call__(self, manager, service_name, event_name): | 265 | def __call__(self, manager, service_name, event_name): |
3962 | 265 | pre_checksum = '' | 266 | pre_checksum = '' |
3963 | 266 | if self.on_change_action and os.path.isfile(self.target): | 267 | if self.on_change_action and os.path.isfile(self.target): |
3964 | 267 | pre_checksum = host.file_hash(self.target) | 268 | pre_checksum = host.file_hash(self.target) |
3965 | 268 | print pre_checksum | ||
3966 | 269 | service = manager.get_service(service_name) | 269 | service = manager.get_service(service_name) |
3967 | 270 | context = {} | 270 | context = {} |
3968 | 271 | for ctx in service.get('required_data', []): | 271 | for ctx in service.get('required_data', []): |
3969 | 272 | context.update(ctx) | 272 | context.update(ctx) |
3970 | 273 | templating.render(self.source, self.target, context, | 273 | templating.render(self.source, self.target, context, |
3971 | 274 | self.owner, self.group, self.perms, | 274 | self.owner, self.group, self.perms, |
3973 | 275 | self.template_searchpath) | 275 | template_loader=self.template_loader) |
3974 | 276 | if self.on_change_action: | 276 | if self.on_change_action: |
3975 | 277 | if pre_checksum == host.file_hash(self.target): | 277 | if pre_checksum == host.file_hash(self.target): |
3977 | 278 | print "No change detected " + self.target | 278 | hookenv.log( |
3978 | 279 | 'No change detected: {}'.format(self.target), | ||
3979 | 280 | hookenv.DEBUG) | ||
3980 | 279 | else: | 281 | else: |
3981 | 280 | self.on_change_action() | 282 | self.on_change_action() |
3982 | 281 | 283 | ||
3983 | 282 | 284 | ||
3984 | === modified file 'hooks/charmhelpers/core/strutils.py' | |||
3985 | --- hooks/charmhelpers/core/strutils.py 2015-06-10 07:35:12 +0000 | |||
3986 | +++ hooks/charmhelpers/core/strutils.py 2015-12-01 15:05:49 +0000 | |||
3987 | @@ -18,6 +18,7 @@ | |||
3988 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3989 | 19 | 19 | ||
3990 | 20 | import six | 20 | import six |
3991 | 21 | import re | ||
3992 | 21 | 22 | ||
3993 | 22 | 23 | ||
3994 | 23 | def bool_from_string(value): | 24 | def bool_from_string(value): |
3995 | @@ -40,3 +41,32 @@ | |||
3996 | 40 | 41 | ||
3997 | 41 | msg = "Unable to interpret string value '%s' as boolean" % (value) | 42 | msg = "Unable to interpret string value '%s' as boolean" % (value) |
3998 | 42 | raise ValueError(msg) | 43 | raise ValueError(msg) |
3999 | 44 | |||
4000 | 45 | |||
4001 | 46 | def bytes_from_string(value): | ||
4002 | 47 | """Interpret human readable string value as bytes. | ||
4003 | 48 | |||
4004 | 49 | Returns int | ||
4005 | 50 | """ | ||
4006 | 51 | BYTE_POWER = { | ||
4007 | 52 | 'K': 1, | ||
4008 | 53 | 'KB': 1, | ||
4009 | 54 | 'M': 2, | ||
4010 | 55 | 'MB': 2, | ||
4011 | 56 | 'G': 3, | ||
4012 | 57 | 'GB': 3, | ||
4013 | 58 | 'T': 4, | ||
4014 | 59 | 'TB': 4, | ||
4015 | 60 | 'P': 5, | ||
4016 | 61 | 'PB': 5, | ||
4017 | 62 | } | ||
4018 | 63 | if isinstance(value, six.string_types): | ||
4019 | 64 | value = six.text_type(value) | ||
4020 | 65 | else: | ||
4021 | 66 | msg = "Unable to interpret non-string value '%s' as boolean" % (value) | ||
4022 | 67 | raise ValueError(msg) | ||
4023 | 68 | matches = re.match("([0-9]+)([a-zA-Z]+)", value) | ||
4024 | 69 | if not matches: | ||
4025 | 70 | msg = "Unable to interpret string value '%s' as bytes" % (value) | ||
4026 | 71 | raise ValueError(msg) | ||
4027 | 72 | return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) | ||
4028 | 43 | 73 | ||
4029 | === modified file 'hooks/charmhelpers/core/templating.py' | |||
4030 | --- hooks/charmhelpers/core/templating.py 2015-06-17 12:23:31 +0000 | |||
4031 | +++ hooks/charmhelpers/core/templating.py 2015-12-01 15:05:49 +0000 | |||
4032 | @@ -21,8 +21,7 @@ | |||
4033 | 21 | 21 | ||
4034 | 22 | 22 | ||
4035 | 23 | def render(source, target, context, owner='root', group='root', | 23 | def render(source, target, context, owner='root', group='root', |
4038 | 24 | perms=0o444, templates_dir=None, encoding='UTF-8', | 24 | perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): |
4037 | 25 | template_searchpath=None): | ||
4039 | 26 | """ | 25 | """ |
4040 | 27 | Render a template. | 26 | Render a template. |
4041 | 28 | 27 | ||
4042 | @@ -41,7 +40,7 @@ | |||
4043 | 41 | this will attempt to use charmhelpers.fetch.apt_install to install it. | 40 | this will attempt to use charmhelpers.fetch.apt_install to install it. |
4044 | 42 | """ | 41 | """ |
4045 | 43 | try: | 42 | try: |
4047 | 44 | from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions | 43 | from jinja2 import FileSystemLoader, Environment, exceptions |
4048 | 45 | except ImportError: | 44 | except ImportError: |
4049 | 46 | try: | 45 | try: |
4050 | 47 | from charmhelpers.fetch import apt_install | 46 | from charmhelpers.fetch import apt_install |
4051 | @@ -51,25 +50,26 @@ | |||
4052 | 51 | level=hookenv.ERROR) | 50 | level=hookenv.ERROR) |
4053 | 52 | raise | 51 | raise |
4054 | 53 | apt_install('python-jinja2', fatal=True) | 52 | apt_install('python-jinja2', fatal=True) |
4056 | 54 | from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions | 53 | from jinja2 import FileSystemLoader, Environment, exceptions |
4057 | 55 | 54 | ||
4063 | 56 | if template_searchpath: | 55 | if template_loader: |
4064 | 57 | fs_loaders = [] | 56 | template_env = Environment(loader=template_loader) |
4060 | 58 | for tmpl_dir in template_searchpath: | ||
4061 | 59 | fs_loaders.append(FileSystemLoader(tmpl_dir)) | ||
4062 | 60 | loader = ChoiceLoader(fs_loaders) | ||
4065 | 61 | else: | 57 | else: |
4066 | 62 | if templates_dir is None: | 58 | if templates_dir is None: |
4067 | 63 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | 59 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') |
4069 | 64 | loader = Environment(loader=FileSystemLoader(templates_dir)) | 60 | template_env = Environment(loader=FileSystemLoader(templates_dir)) |
4070 | 65 | try: | 61 | try: |
4071 | 66 | source = source | 62 | source = source |
4073 | 67 | template = loader.get_template(source) | 63 | template = template_env.get_template(source) |
4074 | 68 | except exceptions.TemplateNotFound as e: | 64 | except exceptions.TemplateNotFound as e: |
4075 | 69 | hookenv.log('Could not load template %s from %s.' % | 65 | hookenv.log('Could not load template %s from %s.' % |
4076 | 70 | (source, templates_dir), | 66 | (source, templates_dir), |
4077 | 71 | level=hookenv.ERROR) | 67 | level=hookenv.ERROR) |
4078 | 72 | raise e | 68 | raise e |
4079 | 73 | content = template.render(context) | 69 | content = template.render(context) |
4081 | 74 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | 70 | target_dir = os.path.dirname(target) |
4082 | 71 | if not os.path.exists(target_dir): | ||
4083 | 72 | # This is a terrible default directory permission, as the file | ||
4084 | 73 | # or its siblings will often contain secrets. | ||
4085 | 74 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | ||
4086 | 75 | host.write_file(target, content.encode(encoding), owner, group, perms) | 75 | host.write_file(target, content.encode(encoding), owner, group, perms) |
4087 | 76 | 76 | ||
4088 | === modified file 'hooks/charmhelpers/core/unitdata.py' | |||
4089 | --- hooks/charmhelpers/core/unitdata.py 2015-06-10 07:35:12 +0000 | |||
4090 | +++ hooks/charmhelpers/core/unitdata.py 2015-12-01 15:05:49 +0000 | |||
4091 | @@ -152,6 +152,7 @@ | |||
4092 | 152 | import collections | 152 | import collections |
4093 | 153 | import contextlib | 153 | import contextlib |
4094 | 154 | import datetime | 154 | import datetime |
4095 | 155 | import itertools | ||
4096 | 155 | import json | 156 | import json |
4097 | 156 | import os | 157 | import os |
4098 | 157 | import pprint | 158 | import pprint |
4099 | @@ -164,8 +165,7 @@ | |||
4100 | 164 | class Storage(object): | 165 | class Storage(object): |
4101 | 165 | """Simple key value database for local unit state within charms. | 166 | """Simple key value database for local unit state within charms. |
4102 | 166 | 167 | ||
4105 | 167 | Modifications are automatically committed at hook exit. That's | 168 | Modifications are not persisted unless :meth:`flush` is called. |
4104 | 168 | currently regardless of exit code. | ||
4106 | 169 | 169 | ||
4107 | 170 | To support dicts, lists, integer, floats, and booleans values | 170 | To support dicts, lists, integer, floats, and booleans values |
4108 | 171 | are automatically json encoded/decoded. | 171 | are automatically json encoded/decoded. |
4109 | @@ -173,8 +173,11 @@ | |||
4110 | 173 | def __init__(self, path=None): | 173 | def __init__(self, path=None): |
4111 | 174 | self.db_path = path | 174 | self.db_path = path |
4112 | 175 | if path is None: | 175 | if path is None: |
4115 | 176 | self.db_path = os.path.join( | 176 | if 'UNIT_STATE_DB' in os.environ: |
4116 | 177 | os.environ.get('CHARM_DIR', ''), '.unit-state.db') | 177 | self.db_path = os.environ['UNIT_STATE_DB'] |
4117 | 178 | else: | ||
4118 | 179 | self.db_path = os.path.join( | ||
4119 | 180 | os.environ.get('CHARM_DIR', ''), '.unit-state.db') | ||
4120 | 178 | self.conn = sqlite3.connect('%s' % self.db_path) | 181 | self.conn = sqlite3.connect('%s' % self.db_path) |
4121 | 179 | self.cursor = self.conn.cursor() | 182 | self.cursor = self.conn.cursor() |
4122 | 180 | self.revision = None | 183 | self.revision = None |
4123 | @@ -189,15 +192,8 @@ | |||
4124 | 189 | self.conn.close() | 192 | self.conn.close() |
4125 | 190 | self._closed = True | 193 | self._closed = True |
4126 | 191 | 194 | ||
4127 | 192 | def _scoped_query(self, stmt, params=None): | ||
4128 | 193 | if params is None: | ||
4129 | 194 | params = [] | ||
4130 | 195 | return stmt, params | ||
4131 | 196 | |||
4132 | 197 | def get(self, key, default=None, record=False): | 195 | def get(self, key, default=None, record=False): |
4136 | 198 | self.cursor.execute( | 196 | self.cursor.execute('select data from kv where key=?', [key]) |
4134 | 199 | *self._scoped_query( | ||
4135 | 200 | 'select data from kv where key=?', [key])) | ||
4137 | 201 | result = self.cursor.fetchone() | 197 | result = self.cursor.fetchone() |
4138 | 202 | if not result: | 198 | if not result: |
4139 | 203 | return default | 199 | return default |
4140 | @@ -206,33 +202,81 @@ | |||
4141 | 206 | return json.loads(result[0]) | 202 | return json.loads(result[0]) |
4142 | 207 | 203 | ||
4143 | 208 | def getrange(self, key_prefix, strip=False): | 204 | def getrange(self, key_prefix, strip=False): |
4146 | 209 | stmt = "select key, data from kv where key like '%s%%'" % key_prefix | 205 | """ |
4147 | 210 | self.cursor.execute(*self._scoped_query(stmt)) | 206 | Get a range of keys starting with a common prefix as a mapping of |
4148 | 207 | keys to values. | ||
4149 | 208 | |||
4150 | 209 | :param str key_prefix: Common prefix among all keys | ||
4151 | 210 | :param bool strip: Optionally strip the common prefix from the key | ||
4152 | 211 | names in the returned dict | ||
4153 | 212 | :return dict: A (possibly empty) dict of key-value mappings | ||
4154 | 213 | """ | ||
4155 | 214 | self.cursor.execute("select key, data from kv where key like ?", | ||
4156 | 215 | ['%s%%' % key_prefix]) | ||
4157 | 211 | result = self.cursor.fetchall() | 216 | result = self.cursor.fetchall() |
4158 | 212 | 217 | ||
4159 | 213 | if not result: | 218 | if not result: |
4161 | 214 | return None | 219 | return {} |
4162 | 215 | if not strip: | 220 | if not strip: |
4163 | 216 | key_prefix = '' | 221 | key_prefix = '' |
4164 | 217 | return dict([ | 222 | return dict([ |
4165 | 218 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) | 223 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
4166 | 219 | 224 | ||
4167 | 220 | def update(self, mapping, prefix=""): | 225 | def update(self, mapping, prefix=""): |
4168 | 226 | """ | ||
4169 | 227 | Set the values of multiple keys at once. | ||
4170 | 228 | |||
4171 | 229 | :param dict mapping: Mapping of keys to values | ||
4172 | 230 | :param str prefix: Optional prefix to apply to all keys in `mapping` | ||
4173 | 231 | before setting | ||
4174 | 232 | """ | ||
4175 | 221 | for k, v in mapping.items(): | 233 | for k, v in mapping.items(): |
4176 | 222 | self.set("%s%s" % (prefix, k), v) | 234 | self.set("%s%s" % (prefix, k), v) |
4177 | 223 | 235 | ||
4178 | 224 | def unset(self, key): | 236 | def unset(self, key): |
4179 | 237 | """ | ||
4180 | 238 | Remove a key from the database entirely. | ||
4181 | 239 | """ | ||
4182 | 225 | self.cursor.execute('delete from kv where key=?', [key]) | 240 | self.cursor.execute('delete from kv where key=?', [key]) |
4183 | 226 | if self.revision and self.cursor.rowcount: | 241 | if self.revision and self.cursor.rowcount: |
4184 | 227 | self.cursor.execute( | 242 | self.cursor.execute( |
4185 | 228 | 'insert into kv_revisions values (?, ?, ?)', | 243 | 'insert into kv_revisions values (?, ?, ?)', |
4186 | 229 | [key, self.revision, json.dumps('DELETED')]) | 244 | [key, self.revision, json.dumps('DELETED')]) |
4187 | 230 | 245 | ||
4188 | 246 | def unsetrange(self, keys=None, prefix=""): | ||
4189 | 247 | """ | ||
4190 | 248 | Remove a range of keys starting with a common prefix, from the database | ||
4191 | 249 | entirely. | ||
4192 | 250 | |||
4193 | 251 | :param list keys: List of keys to remove. | ||
4194 | 252 | :param str prefix: Optional prefix to apply to all keys in ``keys`` | ||
4195 | 253 | before removing. | ||
4196 | 254 | """ | ||
4197 | 255 | if keys is not None: | ||
4198 | 256 | keys = ['%s%s' % (prefix, key) for key in keys] | ||
4199 | 257 | self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) | ||
4200 | 258 | if self.revision and self.cursor.rowcount: | ||
4201 | 259 | self.cursor.execute( | ||
4202 | 260 | 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), | ||
4203 | 261 | list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) | ||
4204 | 262 | else: | ||
4205 | 263 | self.cursor.execute('delete from kv where key like ?', | ||
4206 | 264 | ['%s%%' % prefix]) | ||
4207 | 265 | if self.revision and self.cursor.rowcount: | ||
4208 | 266 | self.cursor.execute( | ||
4209 | 267 | 'insert into kv_revisions values (?, ?, ?)', | ||
4210 | 268 | ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) | ||
4211 | 269 | |||
4212 | 231 | def set(self, key, value): | 270 | def set(self, key, value): |
4213 | 271 | """ | ||
4214 | 272 | Set a value in the database. | ||
4215 | 273 | |||
4216 | 274 | :param str key: Key to set the value for | ||
4217 | 275 | :param value: Any JSON-serializable value to be set | ||
4218 | 276 | """ | ||
4219 | 232 | serialized = json.dumps(value) | 277 | serialized = json.dumps(value) |
4220 | 233 | 278 | ||
4223 | 234 | self.cursor.execute( | 279 | self.cursor.execute('select data from kv where key=?', [key]) |
4222 | 235 | 'select data from kv where key=?', [key]) | ||
4224 | 236 | exists = self.cursor.fetchone() | 280 | exists = self.cursor.fetchone() |
4225 | 237 | 281 | ||
4226 | 238 | # Skip mutations to the same value | 282 | # Skip mutations to the same value |
4227 | 239 | 283 | ||
4228 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
4229 | --- hooks/charmhelpers/fetch/__init__.py 2015-06-10 07:35:12 +0000 | |||
4230 | +++ hooks/charmhelpers/fetch/__init__.py 2015-12-01 15:05:49 +0000 | |||
4231 | @@ -90,6 +90,14 @@ | |||
4232 | 90 | 'kilo/proposed': 'trusty-proposed/kilo', | 90 | 'kilo/proposed': 'trusty-proposed/kilo', |
4233 | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
4234 | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
4235 | 93 | # Liberty | ||
4236 | 94 | 'liberty': 'trusty-updates/liberty', | ||
4237 | 95 | 'trusty-liberty': 'trusty-updates/liberty', | ||
4238 | 96 | 'trusty-liberty/updates': 'trusty-updates/liberty', | ||
4239 | 97 | 'trusty-updates/liberty': 'trusty-updates/liberty', | ||
4240 | 98 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
4241 | 99 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', | ||
4242 | 100 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | ||
4243 | 93 | } | 101 | } |
4244 | 94 | 102 | ||
4245 | 95 | # The order of this list is very important. Handlers should be listed in from | 103 | # The order of this list is very important. Handlers should be listed in from |
4246 | @@ -215,19 +223,27 @@ | |||
4247 | 215 | _run_apt_command(cmd, fatal) | 223 | _run_apt_command(cmd, fatal) |
4248 | 216 | 224 | ||
4249 | 217 | 225 | ||
4250 | 226 | def apt_mark(packages, mark, fatal=False): | ||
4251 | 227 | """Flag one or more packages using apt-mark""" | ||
4252 | 228 | log("Marking {} as {}".format(packages, mark)) | ||
4253 | 229 | cmd = ['apt-mark', mark] | ||
4254 | 230 | if isinstance(packages, six.string_types): | ||
4255 | 231 | cmd.append(packages) | ||
4256 | 232 | else: | ||
4257 | 233 | cmd.extend(packages) | ||
4258 | 234 | |||
4259 | 235 | if fatal: | ||
4260 | 236 | subprocess.check_call(cmd, universal_newlines=True) | ||
4261 | 237 | else: | ||
4262 | 238 | subprocess.call(cmd, universal_newlines=True) | ||
4263 | 239 | |||
4264 | 240 | |||
4265 | 218 | def apt_hold(packages, fatal=False): | 241 | def apt_hold(packages, fatal=False): |
4278 | 219 | """Hold one or more packages""" | 242 | return apt_mark(packages, 'hold', fatal=fatal) |
4279 | 220 | cmd = ['apt-mark', 'hold'] | 243 | |
4280 | 221 | if isinstance(packages, six.string_types): | 244 | |
4281 | 222 | cmd.append(packages) | 245 | def apt_unhold(packages, fatal=False): |
4282 | 223 | else: | 246 | return apt_mark(packages, 'unhold', fatal=fatal) |
4271 | 224 | cmd.extend(packages) | ||
4272 | 225 | log("Holding {}".format(packages)) | ||
4273 | 226 | |||
4274 | 227 | if fatal: | ||
4275 | 228 | subprocess.check_call(cmd) | ||
4276 | 229 | else: | ||
4277 | 230 | subprocess.call(cmd) | ||
4283 | 231 | 247 | ||
4284 | 232 | 248 | ||
4285 | 233 | def add_source(source, key=None): | 249 | def add_source(source, key=None): |
4286 | @@ -370,8 +386,9 @@ | |||
4287 | 370 | for handler in handlers: | 386 | for handler in handlers: |
4288 | 371 | try: | 387 | try: |
4289 | 372 | installed_to = handler.install(source, *args, **kwargs) | 388 | installed_to = handler.install(source, *args, **kwargs) |
4292 | 373 | except UnhandledSource: | 389 | except UnhandledSource as e: |
4293 | 374 | pass | 390 | log('Install source attempt unsuccessful: {}'.format(e), |
4294 | 391 | level='WARNING') | ||
4295 | 375 | if not installed_to: | 392 | if not installed_to: |
4296 | 376 | raise UnhandledSource("No handler found for source {}".format(source)) | 393 | raise UnhandledSource("No handler found for source {}".format(source)) |
4297 | 377 | return installed_to | 394 | return installed_to |
4298 | 378 | 395 | ||
4299 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
4300 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-06-10 07:35:12 +0000 | |||
4301 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-12-01 15:05:49 +0000 | |||
4302 | @@ -77,6 +77,8 @@ | |||
4303 | 77 | def can_handle(self, source): | 77 | def can_handle(self, source): |
4304 | 78 | url_parts = self.parse_url(source) | 78 | url_parts = self.parse_url(source) |
4305 | 79 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): | 79 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
4306 | 80 | # XXX: Why is this returning a boolean and a string? It's | ||
4307 | 81 | # doomed to fail since "bool(can_handle('foo://'))" will be True. | ||
4308 | 80 | return "Wrong source type" | 82 | return "Wrong source type" |
4309 | 81 | if get_archive_handler(self.base_url(source)): | 83 | if get_archive_handler(self.base_url(source)): |
4310 | 82 | return True | 84 | return True |
4311 | @@ -155,7 +157,11 @@ | |||
4312 | 155 | else: | 157 | else: |
4313 | 156 | algorithms = hashlib.algorithms_available | 158 | algorithms = hashlib.algorithms_available |
4314 | 157 | if key in algorithms: | 159 | if key in algorithms: |
4316 | 158 | check_hash(dld_file, value, key) | 160 | if len(value) != 1: |
4317 | 161 | raise TypeError( | ||
4318 | 162 | "Expected 1 hash value, not %d" % len(value)) | ||
4319 | 163 | expected = value[0] | ||
4320 | 164 | check_hash(dld_file, expected, key) | ||
4321 | 159 | if checksum: | 165 | if checksum: |
4322 | 160 | check_hash(dld_file, checksum, hash_type) | 166 | check_hash(dld_file, checksum, hash_type) |
4323 | 161 | return extract(dld_file, dest) | 167 | return extract(dld_file, dest) |
4324 | 162 | 168 | ||
4325 | === modified file 'hooks/charmhelpers/fetch/giturl.py' | |||
4326 | --- hooks/charmhelpers/fetch/giturl.py 2015-06-10 07:35:12 +0000 | |||
4327 | +++ hooks/charmhelpers/fetch/giturl.py 2015-12-01 15:05:49 +0000 | |||
4328 | @@ -67,7 +67,7 @@ | |||
4329 | 67 | try: | 67 | try: |
4330 | 68 | self.clone(source, dest_dir, branch, depth) | 68 | self.clone(source, dest_dir, branch, depth) |
4331 | 69 | except GitCommandError as e: | 69 | except GitCommandError as e: |
4333 | 70 | raise UnhandledSource(e.message) | 70 | raise UnhandledSource(e) |
4334 | 71 | except OSError as e: | 71 | except OSError as e: |
4335 | 72 | raise UnhandledSource(e.strerror) | 72 | raise UnhandledSource(e.strerror) |
4336 | 73 | return dest_dir | 73 | return dest_dir |
4337 | 74 | 74 | ||
4338 | === modified file 'hooks/services.py' | |||
4339 | --- hooks/services.py 2015-09-14 16:44:47 +0000 | |||
4340 | +++ hooks/services.py 2015-12-01 15:05:49 +0000 | |||
4341 | @@ -2,6 +2,9 @@ | |||
4342 | 2 | from charmhelpers.core import hookenv | 2 | from charmhelpers.core import hookenv |
4343 | 3 | from charmhelpers.core.services.base import ServiceManager | 3 | from charmhelpers.core.services.base import ServiceManager |
4344 | 4 | from charmhelpers.core.services import helpers | 4 | from charmhelpers.core.services import helpers |
4345 | 5 | from charmhelpers.contrib.openstack.templating import get_loader | ||
4346 | 6 | from charmhelpers.core.services.base import service_restart | ||
4347 | 7 | from charmhelpers.contrib.openstack.utils import os_release | ||
4348 | 5 | 8 | ||
4349 | 6 | import vpp_utils | 9 | import vpp_utils |
4350 | 7 | import vpp_data | 10 | import vpp_data |
4351 | @@ -9,6 +12,7 @@ | |||
4352 | 9 | 12 | ||
4353 | 10 | def manage(): | 13 | def manage(): |
4354 | 11 | config = hookenv.config() | 14 | config = hookenv.config() |
4355 | 15 | release = os_release('neutron-common') | ||
4356 | 12 | manager = ServiceManager([ | 16 | manager = ServiceManager([ |
4357 | 13 | # Actions which have no prerequisites and can be rerun | 17 | # Actions which have no prerequisites and can be rerun |
4358 | 14 | { | 18 | { |
4359 | @@ -18,6 +22,7 @@ | |||
4360 | 18 | ], | 22 | ], |
4361 | 19 | 'provided_data': [ | 23 | 'provided_data': [ |
4362 | 20 | vpp_data.NeutronPluginRelation(), | 24 | vpp_data.NeutronPluginRelation(), |
4363 | 25 | vpp_data.AMQPRelation(), | ||
4364 | 21 | ], | 26 | ], |
4365 | 22 | }, | 27 | }, |
4366 | 23 | # Install hugepages and components reliant on huge pages | 28 | # Install hugepages and components reliant on huge pages |
4367 | @@ -34,12 +39,14 @@ | |||
4368 | 34 | { | 39 | { |
4369 | 35 | 'service': 'vpp-compute-render', | 40 | 'service': 'vpp-compute-render', |
4370 | 36 | 'required_data': [ | 41 | 'required_data': [ |
4371 | 42 | vpp_data.AMQPRelation(), | ||
4372 | 37 | vpp_data.SystemResources(), | 43 | vpp_data.SystemResources(), |
4373 | 38 | vpp_data.NeutronPluginRelation(), | 44 | vpp_data.NeutronPluginRelation(), |
4374 | 39 | vpp_data.ODLControllerRelation(), | 45 | vpp_data.ODLControllerRelation(), |
4375 | 40 | config, | 46 | config, |
4376 | 41 | vpp_data.ConfigTranslation(), | 47 | vpp_data.ConfigTranslation(), |
4377 | 42 | vpp_data.PCIInfo(), | 48 | vpp_data.PCIInfo(), |
4378 | 49 | vpp_data.NeutronPluginAPIRelation(), | ||
4379 | 43 | ], | 50 | ], |
4380 | 44 | 'data_ready': [ | 51 | 'data_ready': [ |
4381 | 45 | vpp_utils.bind_orphaned_net_interfaces, | 52 | vpp_utils.bind_orphaned_net_interfaces, |
4382 | @@ -53,6 +60,27 @@ | |||
4383 | 53 | target='/etc/apparmor.d/libvirt/TEMPLATE.qemu', | 60 | target='/etc/apparmor.d/libvirt/TEMPLATE.qemu', |
4384 | 54 | on_change_action=(partial(vpp_utils.reload_apparmor)), | 61 | on_change_action=(partial(vpp_utils.reload_apparmor)), |
4385 | 55 | ), | 62 | ), |
4386 | 63 | helpers.render_template( | ||
4387 | 64 | source='neutron.conf', | ||
4388 | 65 | template_loader=get_loader('templates/', release), | ||
4389 | 66 | target='/etc/neutron/neutron.conf', | ||
4390 | 67 | on_change_action=(partial(service_restart, | ||
4391 | 68 | 'neutron-dhcp-agent')), | ||
4392 | 69 | ), | ||
4393 | 70 | helpers.render_template( | ||
4394 | 71 | source='dhcp_agent.ini', | ||
4395 | 72 | template_loader=get_loader('templates/', release), | ||
4396 | 73 | target='/etc/neutron/dhcp_agent.ini', | ||
4397 | 74 | on_change_action=(partial(service_restart, | ||
4398 | 75 | 'neutron-dhcp-agent')), | ||
4399 | 76 | ), | ||
4400 | 77 | helpers.render_template( | ||
4401 | 78 | source='metadata_agent.ini', | ||
4402 | 79 | template_loader=get_loader('templates/', release), | ||
4403 | 80 | target='/etc/neutron/metadata_agent.ini', | ||
4404 | 81 | on_change_action=(partial(service_restart, | ||
4405 | 82 | 'neutron-metadata-agent')), | ||
4406 | 83 | ), | ||
4407 | 56 | vpp_utils.odl_node_registration, | 84 | vpp_utils.odl_node_registration, |
4408 | 57 | vpp_utils.odl_register_macs, | 85 | vpp_utils.odl_register_macs, |
4409 | 58 | vpp_utils.bind_orphaned_net_interfaces, | 86 | vpp_utils.bind_orphaned_net_interfaces, |
4410 | 59 | 87 | ||
4411 | === modified file 'hooks/vpp_data.py' | |||
4412 | --- hooks/vpp_data.py 2015-08-17 06:56:01 +0000 | |||
4413 | +++ hooks/vpp_data.py 2015-12-01 15:05:49 +0000 | |||
4414 | @@ -2,16 +2,47 @@ | |||
4415 | 2 | import glob | 2 | import glob |
4416 | 3 | import os | 3 | import os |
4417 | 4 | import json | 4 | import json |
4418 | 5 | from charmhelpers.contrib.openstack import context | ||
4419 | 5 | from charmhelpers.core.services import helpers | 6 | from charmhelpers.core.services import helpers |
4420 | 6 | from charmhelpers.core.hookenv import( | 7 | from charmhelpers.core.hookenv import( |
4421 | 7 | config, | 8 | config, |
4422 | 8 | log, | 9 | log, |
4423 | 9 | ) | 10 | ) |
4424 | 11 | import uuid | ||
4425 | 10 | 12 | ||
4426 | 11 | VLAN = 'vlan' | 13 | VLAN = 'vlan' |
4427 | 12 | VXLAN = 'vxlan' | 14 | VXLAN = 'vxlan' |
4428 | 13 | GRE = 'gre' | 15 | GRE = 'gre' |
4429 | 14 | OVERLAY_NET_TYPES = [VXLAN, GRE] | 16 | OVERLAY_NET_TYPES = [VXLAN, GRE] |
4430 | 17 | NEUTRON_CONF_DIR = "/etc/neutron" | ||
4431 | 18 | SHARED_SECRET_FILE = "/etc/neutron/secret.txt" | ||
4432 | 19 | |||
4433 | 20 | class NeutronPluginAPIRelation(helpers.RelationContext): | ||
4434 | 21 | name = 'neutron-plugin-api' | ||
4435 | 22 | interface = 'neutron-plugin-api' | ||
4436 | 23 | |||
4437 | 24 | def get_first_data(self): | ||
4438 | 25 | if self.get('neutron-plugin-api') and len(self['neutron-plugin-api']): | ||
4439 | 26 | return self['neutron-plugin-api'][0] | ||
4440 | 27 | else: | ||
4441 | 28 | return {} | ||
4442 | 29 | |||
4443 | 30 | def get_data(self): | ||
4444 | 31 | super(NeutronPluginAPIRelation, self).get_data() | ||
4445 | 32 | api_server = self.get_first_data() | ||
4446 | 33 | self['service_host'] = api_server.get('service_host') | ||
4447 | 34 | self['service_protocol'] = api_server.get('service_protocol', 'http') | ||
4448 | 35 | self['service_port'] = api_server.get('service_port') | ||
4449 | 36 | self['admin_tenant_name'] = api_server.get('service_tenant') | ||
4450 | 37 | self['admin_user'] = api_server.get('service_username') | ||
4451 | 38 | self['admin_password'] = api_server.get('service_password') | ||
4452 | 39 | self['region'] = api_server.get('region') | ||
4453 | 40 | |||
4454 | 41 | def is_ready(self): | ||
4455 | 42 | if 'service_password' in self.get_first_data(): | ||
4456 | 43 | return True | ||
4457 | 44 | else: | ||
4458 | 45 | return False | ||
4459 | 15 | 46 | ||
4460 | 16 | 47 | ||
4461 | 17 | class ODLControllerRelation(helpers.RelationContext): | 48 | class ODLControllerRelation(helpers.RelationContext): |
4462 | @@ -47,6 +78,22 @@ | |||
4463 | 47 | name = 'neutron-plugin' | 78 | name = 'neutron-plugin' |
4464 | 48 | interface = 'neutron-plugin-api-subordinate' | 79 | interface = 'neutron-plugin-api-subordinate' |
4465 | 49 | 80 | ||
4466 | 81 | def __init__(self, *args, **kwargs): | ||
4467 | 82 | super(NeutronPluginRelation, self).__init__(*args, **kwargs) | ||
4468 | 83 | self['shared_secret'] = self.get_metadata_secret() | ||
4469 | 84 | |||
4470 | 85 | def get_metadata_secret(self): | ||
4471 | 86 | secret = None | ||
4472 | 87 | if os.path.exists(os.path.dirname(SHARED_SECRET_FILE)): | ||
4473 | 88 | if os.path.exists(SHARED_SECRET_FILE): | ||
4474 | 89 | with open(SHARED_SECRET_FILE, 'r') as secret_file: | ||
4475 | 90 | secret = secret_file.read().strip() | ||
4476 | 91 | else: | ||
4477 | 92 | secret = str(uuid.uuid4()) | ||
4478 | 93 | with open(SHARED_SECRET_FILE, 'w') as secret_file: | ||
4479 | 94 | secret_file.write(secret) | ||
4480 | 95 | return secret | ||
4481 | 96 | |||
4482 | 50 | def provide_data(self): | 97 | def provide_data(self): |
4483 | 51 | # Add sections and tuples to insert values into neutron-server's | 98 | # Add sections and tuples to insert values into neutron-server's |
4484 | 52 | # neutron.conf e.g. | 99 | # neutron.conf e.g. |
4485 | @@ -83,6 +130,7 @@ | |||
4486 | 83 | relation_info = { | 130 | relation_info = { |
4487 | 84 | 'neutron-plugin': 'odl', | 131 | 'neutron-plugin': 'odl', |
4488 | 85 | 'subordinate_configuration': json.dumps(principle_config), | 132 | 'subordinate_configuration': json.dumps(principle_config), |
4489 | 133 | 'metadata-shared-secret': self['shared_secret'], | ||
4490 | 86 | } | 134 | } |
4491 | 87 | return relation_info | 135 | return relation_info |
4492 | 88 | 136 | ||
4493 | @@ -179,3 +227,30 @@ | |||
4494 | 179 | 'net': tmp_dict.get('net'), | 227 | 'net': tmp_dict.get('net'), |
4495 | 180 | }] | 228 | }] |
4496 | 181 | return mac_net_config | 229 | return mac_net_config |
4497 | 230 | |||
4498 | 231 | |||
4499 | 232 | class AMQPRelation(helpers.RelationContext): | ||
4500 | 233 | name = 'amqp' | ||
4501 | 234 | interface = 'rabbitmq' | ||
4502 | 235 | |||
4503 | 236 | def __init__(self, *args, **kwargs): | ||
4504 | 237 | self.ctxt = context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR)() | ||
4505 | 238 | super(AMQPRelation, self).__init__(*args, **kwargs) | ||
4506 | 239 | |||
4507 | 240 | def get_data(self): | ||
4508 | 241 | super(AMQPRelation, self).get_data() | ||
4509 | 242 | for key, value in self.ctxt.iteritems(): | ||
4510 | 243 | self[key] = value | ||
4511 | 244 | |||
4512 | 245 | def provide_data(self): | ||
4513 | 246 | relation_info = { | ||
4514 | 247 | 'username': config('rabbit-user'), | ||
4515 | 248 | 'vhost': config('rabbit-vhost'), | ||
4516 | 249 | } | ||
4517 | 250 | return relation_info | ||
4518 | 251 | |||
4519 | 252 | def is_ready(self): | ||
4520 | 253 | if self.ctxt.get('rabbitmq_password'): | ||
4521 | 254 | return True | ||
4522 | 255 | else: | ||
4523 | 256 | return False | ||
4524 | 182 | 257 | ||
4525 | === modified file 'hooks/vpp_utils.py' | |||
4526 | --- hooks/vpp_utils.py 2015-09-14 16:44:47 +0000 | |||
4527 | +++ hooks/vpp_utils.py 2015-12-01 15:05:49 +0000 | |||
4528 | @@ -24,7 +24,8 @@ | |||
4529 | 24 | ODL_MOUNT_PATH = ('/restconf/config/opendaylight-inventory:nodes/node/' | 24 | ODL_MOUNT_PATH = ('/restconf/config/opendaylight-inventory:nodes/node/' |
4530 | 25 | 'controller-config/yang-ext:mount/config:modules') | 25 | 'controller-config/yang-ext:mount/config:modules') |
4531 | 26 | 26 | ||
4533 | 27 | PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios'] | 27 | PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios', |
4534 | 28 | 'neutron-dhcp-agent'] | ||
4535 | 28 | 29 | ||
4536 | 29 | 30 | ||
4537 | 30 | def install_packages(servicename): | 31 | def install_packages(servicename): |
4538 | 31 | 32 | ||
4539 | === modified file 'metadata.yaml' | |||
4540 | --- metadata.yaml 2015-07-21 15:18:07 +0000 | |||
4541 | +++ metadata.yaml 2015-12-01 15:05:49 +0000 | |||
4542 | @@ -17,3 +17,7 @@ | |||
4543 | 17 | container: | 17 | container: |
4544 | 18 | interface: juju-info | 18 | interface: juju-info |
4545 | 19 | scope: container | 19 | scope: container |
4546 | 20 | amqp: | ||
4547 | 21 | interface: rabbitmq | ||
4548 | 22 | neutron-plugin-api: | ||
4549 | 23 | interface: neutron-plugin-api | ||
4550 | 20 | 24 | ||
4551 | === added directory 'templates/icehouse' | |||
4552 | === added file 'templates/icehouse/dhcp_agent.ini' | |||
4553 | --- templates/icehouse/dhcp_agent.ini 1970-01-01 00:00:00 +0000 | |||
4554 | +++ templates/icehouse/dhcp_agent.ini 2015-12-01 15:05:49 +0000 | |||
4555 | @@ -0,0 +1,13 @@ | |||
4556 | 1 | ############################################################################### | ||
4557 | 2 | # [ WARNING ] | ||
4558 | 3 | # Configuration file maintained by Juju. Local changes may be overwritten. | ||
4559 | 4 | ############################################################################### | ||
4560 | 5 | [DEFAULT] | ||
4561 | 6 | debug = {{ debug }} | ||
4562 | 7 | resync_interval = 5 | ||
4563 | 8 | interface_driver = neutron.agent.linux.interface.NSNullDriver | ||
4564 | 9 | dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq | ||
4565 | 10 | use_namespaces = True | ||
4566 | 11 | enable_isolated_metadata = True | ||
4567 | 12 | enable_metadata_network = True | ||
4568 | 13 | |||
4569 | 0 | 14 | ||
4570 | === added file 'templates/icehouse/metadata_agent.ini' | |||
4571 | --- templates/icehouse/metadata_agent.ini 1970-01-01 00:00:00 +0000 | |||
4572 | +++ templates/icehouse/metadata_agent.ini 2015-12-01 15:05:49 +0000 | |||
4573 | @@ -0,0 +1,15 @@ | |||
4574 | 1 | ############################################################################### | ||
4575 | 2 | # [ WARNING ] | ||
4576 | 3 | # Configuration file maintained by Juju. Local changes may be overwritten. | ||
4577 | 4 | ############################################################################### | ||
4578 | 5 | # Metadata service seems to cache neutron api url from keystone so trigger | ||
4579 | 6 | |||
4580 | 7 | [DEFAULT] | ||
4581 | 8 | auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0 | ||
4582 | 9 | auth_region = {{ region }} | ||
4583 | 10 | admin_tenant_name = {{ admin_tenant_name }} | ||
4584 | 11 | admin_user = {{ admin_user }} | ||
4585 | 12 | admin_password = {{ admin_password }} | ||
4586 | 13 | nova_metadata_port = 8775 | ||
4587 | 14 | metadata_proxy_shared_secret = {{ shared_secret }} | ||
4588 | 15 | cache_url = memory://?default_ttl=5 | ||
4589 | 0 | 16 | ||
4590 | === added file 'templates/icehouse/neutron.conf' | |||
4591 | --- templates/icehouse/neutron.conf 1970-01-01 00:00:00 +0000 | |||
4592 | +++ templates/icehouse/neutron.conf 2015-12-01 15:05:49 +0000 | |||
4593 | @@ -0,0 +1,31 @@ | |||
4594 | 1 | # icehouse | ||
4595 | 2 | ############################################################################### | ||
4596 | 3 | # [ WARNING ] | ||
4597 | 4 | # Configuration file maintained by Juju. Local changes may be overwritten. | ||
4598 | 5 | # Config managed by neutron-openvswitch charm | ||
4599 | 6 | ############################################################################### | ||
4600 | 7 | [DEFAULT] | ||
4601 | 8 | verbose = {{ verbose }} | ||
4602 | 9 | debug = {{ debug }} | ||
4603 | 10 | state_path = /var/lib/neutron | ||
4604 | 11 | lock_path = $state_path/lock | ||
4605 | 12 | bind_host = 0.0.0.0 | ||
4606 | 13 | bind_port = 9696 | ||
4607 | 14 | |||
4608 | 15 | api_paste_config = /etc/neutron/api-paste.ini | ||
4609 | 16 | auth_strategy = keystone | ||
4610 | 17 | default_notification_level = INFO | ||
4611 | 18 | notification_topics = notifications | ||
4612 | 19 | |||
4613 | 20 | {% include "parts/rabbitmq" %} | ||
4614 | 21 | |||
4615 | 22 | [QUOTAS] | ||
4616 | 23 | |||
4617 | 24 | [DEFAULT_SERVICETYPE] | ||
4618 | 25 | |||
4619 | 26 | [AGENT] | ||
4620 | 27 | root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf | ||
4621 | 28 | |||
4622 | 29 | [keystone_authtoken] | ||
4623 | 30 | signing_dir = /var/lib/neutron/keystone-signing | ||
4624 | 31 | |||
4625 | 0 | 32 | ||
4626 | === added directory 'templates/parts' | |||
4627 | === added file 'templates/parts/rabbitmq' | |||
4628 | --- templates/parts/rabbitmq 1970-01-01 00:00:00 +0000 | |||
4629 | +++ templates/parts/rabbitmq 2015-12-01 15:05:49 +0000 | |||
4630 | @@ -0,0 +1,21 @@ | |||
4631 | 1 | {% if rabbitmq_host or rabbitmq_hosts -%} | ||
4632 | 2 | rabbit_userid = {{ rabbitmq_user }} | ||
4633 | 3 | rabbit_virtual_host = {{ rabbitmq_virtual_host }} | ||
4634 | 4 | rabbit_password = {{ rabbitmq_password }} | ||
4635 | 5 | {% if rabbitmq_hosts -%} | ||
4636 | 6 | rabbit_hosts = {{ rabbitmq_hosts }} | ||
4637 | 7 | {% if rabbitmq_ha_queues -%} | ||
4638 | 8 | rabbit_ha_queues = True | ||
4639 | 9 | rabbit_durable_queues = False | ||
4640 | 10 | {% endif -%} | ||
4641 | 11 | {% else -%} | ||
4642 | 12 | rabbit_host = {{ rabbitmq_host }} | ||
4643 | 13 | {% endif -%} | ||
4644 | 14 | {% if rabbit_ssl_port -%} | ||
4645 | 15 | rabbit_use_ssl = True | ||
4646 | 16 | rabbit_port = {{ rabbit_ssl_port }} | ||
4647 | 17 | {% if rabbit_ssl_ca -%} | ||
4648 | 18 | kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} | ||
4649 | 19 | {% endif -%} | ||
4650 | 20 | {% endif -%} | ||
4651 | 21 | {% endif -%} | ||
4652 | 0 | \ No newline at end of file | 22 | \ No newline at end of file |
4653 | 1 | 23 | ||
4654 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
4655 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-06-16 07:53:15 +0000 | |||
4656 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-12-01 15:05:49 +0000 | |||
4657 | @@ -14,14 +14,21 @@ | |||
4658 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
4659 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4660 | 16 | 16 | ||
4661 | 17 | import ConfigParser | ||
4662 | 18 | import io | 17 | import io |
4663 | 19 | import logging | 18 | import logging |
4664 | 19 | import os | ||
4665 | 20 | import re | 20 | import re |
4666 | 21 | import sys | 21 | import sys |
4667 | 22 | import time | 22 | import time |
4668 | 23 | 23 | ||
4669 | 24 | import amulet | ||
4670 | 25 | import distro_info | ||
4671 | 24 | import six | 26 | import six |
4672 | 27 | from six.moves import configparser | ||
4673 | 28 | if six.PY3: | ||
4674 | 29 | from urllib import parse as urlparse | ||
4675 | 30 | else: | ||
4676 | 31 | import urlparse | ||
4677 | 25 | 32 | ||
4678 | 26 | 33 | ||
4679 | 27 | class AmuletUtils(object): | 34 | class AmuletUtils(object): |
4680 | @@ -33,6 +40,7 @@ | |||
4681 | 33 | 40 | ||
4682 | 34 | def __init__(self, log_level=logging.ERROR): | 41 | def __init__(self, log_level=logging.ERROR): |
4683 | 35 | self.log = self.get_logger(level=log_level) | 42 | self.log = self.get_logger(level=log_level) |
4684 | 43 | self.ubuntu_releases = self.get_ubuntu_releases() | ||
4685 | 36 | 44 | ||
4686 | 37 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | 45 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): |
4687 | 38 | """Get a logger object that will log to stdout.""" | 46 | """Get a logger object that will log to stdout.""" |
4688 | @@ -70,12 +78,44 @@ | |||
4689 | 70 | else: | 78 | else: |
4690 | 71 | return False | 79 | return False |
4691 | 72 | 80 | ||
4692 | 81 | def get_ubuntu_release_from_sentry(self, sentry_unit): | ||
4693 | 82 | """Get Ubuntu release codename from sentry unit. | ||
4694 | 83 | |||
4695 | 84 | :param sentry_unit: amulet sentry/service unit pointer | ||
4696 | 85 | :returns: list of strings - release codename, failure message | ||
4697 | 86 | """ | ||
4698 | 87 | msg = None | ||
4699 | 88 | cmd = 'lsb_release -cs' | ||
4700 | 89 | release, code = sentry_unit.run(cmd) | ||
4701 | 90 | if code == 0: | ||
4702 | 91 | self.log.debug('{} lsb_release: {}'.format( | ||
4703 | 92 | sentry_unit.info['unit_name'], release)) | ||
4704 | 93 | else: | ||
4705 | 94 | msg = ('{} `{}` returned {} ' | ||
4706 | 95 | '{}'.format(sentry_unit.info['unit_name'], | ||
4707 | 96 | cmd, release, code)) | ||
4708 | 97 | if release not in self.ubuntu_releases: | ||
4709 | 98 | msg = ("Release ({}) not found in Ubuntu releases " | ||
4710 | 99 | "({})".format(release, self.ubuntu_releases)) | ||
4711 | 100 | return release, msg | ||
4712 | 101 | |||
4713 | 73 | def validate_services(self, commands): | 102 | def validate_services(self, commands): |
4717 | 74 | """Validate services. | 103 | """Validate that lists of commands succeed on service units. Can be |
4718 | 75 | 104 | used to verify system services are running on the corresponding | |
4716 | 76 | Verify the specified services are running on the corresponding | ||
4719 | 77 | service units. | 105 | service units. |
4721 | 78 | """ | 106 | |
4722 | 107 | :param commands: dict with sentry keys and arbitrary command list vals | ||
4723 | 108 | :returns: None if successful, Failure string message otherwise | ||
4724 | 109 | """ | ||
4725 | 110 | self.log.debug('Checking status of system services...') | ||
4726 | 111 | |||
4727 | 112 | # /!\ DEPRECATION WARNING (beisner): | ||
4728 | 113 | # New and existing tests should be rewritten to use | ||
4729 | 114 | # validate_services_by_name() as it is aware of init systems. | ||
4730 | 115 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
4731 | 116 | 'validate_services_by_name instead of validate_services ' | ||
4732 | 117 | 'due to init system differences.') | ||
4733 | 118 | |||
4734 | 79 | for k, v in six.iteritems(commands): | 119 | for k, v in six.iteritems(commands): |
4735 | 80 | for cmd in v: | 120 | for cmd in v: |
4736 | 81 | output, code = k.run(cmd) | 121 | output, code = k.run(cmd) |
4737 | @@ -86,6 +126,45 @@ | |||
4738 | 86 | return "command `{}` returned {}".format(cmd, str(code)) | 126 | return "command `{}` returned {}".format(cmd, str(code)) |
4739 | 87 | return None | 127 | return None |
4740 | 88 | 128 | ||
4741 | 129 | def validate_services_by_name(self, sentry_services): | ||
4742 | 130 | """Validate system service status by service name, automatically | ||
4743 | 131 | detecting init system based on Ubuntu release codename. | ||
4744 | 132 | |||
4745 | 133 | :param sentry_services: dict with sentry keys and svc list values | ||
4746 | 134 | :returns: None if successful, Failure string message otherwise | ||
4747 | 135 | """ | ||
4748 | 136 | self.log.debug('Checking status of system services...') | ||
4749 | 137 | |||
4750 | 138 | # Point at which systemd became a thing | ||
4751 | 139 | systemd_switch = self.ubuntu_releases.index('vivid') | ||
4752 | 140 | |||
4753 | 141 | for sentry_unit, services_list in six.iteritems(sentry_services): | ||
4754 | 142 | # Get lsb_release codename from unit | ||
4755 | 143 | release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) | ||
4756 | 144 | if ret: | ||
4757 | 145 | return ret | ||
4758 | 146 | |||
4759 | 147 | for service_name in services_list: | ||
4760 | 148 | if (self.ubuntu_releases.index(release) >= systemd_switch or | ||
4761 | 149 | service_name in ['rabbitmq-server', 'apache2']): | ||
4762 | 150 | # init is systemd (or regular sysv) | ||
4763 | 151 | cmd = 'sudo service {} status'.format(service_name) | ||
4764 | 152 | output, code = sentry_unit.run(cmd) | ||
4765 | 153 | service_running = code == 0 | ||
4766 | 154 | elif self.ubuntu_releases.index(release) < systemd_switch: | ||
4767 | 155 | # init is upstart | ||
4768 | 156 | cmd = 'sudo status {}'.format(service_name) | ||
4769 | 157 | output, code = sentry_unit.run(cmd) | ||
4770 | 158 | service_running = code == 0 and "start/running" in output | ||
4771 | 159 | |||
4772 | 160 | self.log.debug('{} `{}` returned ' | ||
4773 | 161 | '{}'.format(sentry_unit.info['unit_name'], | ||
4774 | 162 | cmd, code)) | ||
4775 | 163 | if not service_running: | ||
4776 | 164 | return u"command `{}` returned {} {}".format( | ||
4777 | 165 | cmd, output, str(code)) | ||
4778 | 166 | return None | ||
4779 | 167 | |||
4780 | 89 | def _get_config(self, unit, filename): | 168 | def _get_config(self, unit, filename): |
4781 | 90 | """Get a ConfigParser object for parsing a unit's config file.""" | 169 | """Get a ConfigParser object for parsing a unit's config file.""" |
4782 | 91 | file_contents = unit.file_contents(filename) | 170 | file_contents = unit.file_contents(filename) |
4783 | @@ -93,7 +172,7 @@ | |||
4784 | 93 | # NOTE(beisner): by default, ConfigParser does not handle options | 172 | # NOTE(beisner): by default, ConfigParser does not handle options |
4785 | 94 | # with no value, such as the flags used in the mysql my.cnf file. | 173 | # with no value, such as the flags used in the mysql my.cnf file. |
4786 | 95 | # https://bugs.python.org/issue7005 | 174 | # https://bugs.python.org/issue7005 |
4788 | 96 | config = ConfigParser.ConfigParser(allow_no_value=True) | 175 | config = configparser.ConfigParser(allow_no_value=True) |
4789 | 97 | config.readfp(io.StringIO(file_contents)) | 176 | config.readfp(io.StringIO(file_contents)) |
4790 | 98 | return config | 177 | return config |
4791 | 99 | 178 | ||
4792 | @@ -103,7 +182,15 @@ | |||
4793 | 103 | 182 | ||
4794 | 104 | Verify that the specified section of the config file contains | 183 | Verify that the specified section of the config file contains |
4795 | 105 | the expected option key:value pairs. | 184 | the expected option key:value pairs. |
4796 | 185 | |||
4797 | 186 | Compare expected dictionary data vs actual dictionary data. | ||
4798 | 187 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
4799 | 188 | longs, or can be a function that evaluates a variable and returns a | ||
4800 | 189 | bool. | ||
4801 | 106 | """ | 190 | """ |
4802 | 191 | self.log.debug('Validating config file data ({} in {} on {})' | ||
4803 | 192 | '...'.format(section, config_file, | ||
4804 | 193 | sentry_unit.info['unit_name'])) | ||
4805 | 107 | config = self._get_config(sentry_unit, config_file) | 194 | config = self._get_config(sentry_unit, config_file) |
4806 | 108 | 195 | ||
4807 | 109 | if section != 'DEFAULT' and not config.has_section(section): | 196 | if section != 'DEFAULT' and not config.has_section(section): |
4808 | @@ -112,9 +199,20 @@ | |||
4809 | 112 | for k in expected.keys(): | 199 | for k in expected.keys(): |
4810 | 113 | if not config.has_option(section, k): | 200 | if not config.has_option(section, k): |
4811 | 114 | return "section [{}] is missing option {}".format(section, k) | 201 | return "section [{}] is missing option {}".format(section, k) |
4813 | 115 | if config.get(section, k) != expected[k]: | 202 | |
4814 | 203 | actual = config.get(section, k) | ||
4815 | 204 | v = expected[k] | ||
4816 | 205 | if (isinstance(v, six.string_types) or | ||
4817 | 206 | isinstance(v, bool) or | ||
4818 | 207 | isinstance(v, six.integer_types)): | ||
4819 | 208 | # handle explicit values | ||
4820 | 209 | if actual != v: | ||
4821 | 210 | return "section [{}] {}:{} != expected {}:{}".format( | ||
4822 | 211 | section, k, actual, k, expected[k]) | ||
4823 | 212 | # handle function pointers, such as not_null or valid_ip | ||
4824 | 213 | elif not v(actual): | ||
4825 | 116 | return "section [{}] {}:{} != expected {}:{}".format( | 214 | return "section [{}] {}:{} != expected {}:{}".format( |
4827 | 117 | section, k, config.get(section, k), k, expected[k]) | 215 | section, k, actual, k, expected[k]) |
4828 | 118 | return None | 216 | return None |
4829 | 119 | 217 | ||
4830 | 120 | def _validate_dict_data(self, expected, actual): | 218 | def _validate_dict_data(self, expected, actual): |
4831 | @@ -122,7 +220,7 @@ | |||
4832 | 122 | 220 | ||
4833 | 123 | Compare expected dictionary data vs actual dictionary data. | 221 | Compare expected dictionary data vs actual dictionary data. |
4834 | 124 | The values in the 'expected' dictionary can be strings, bools, ints, | 222 | The values in the 'expected' dictionary can be strings, bools, ints, |
4836 | 125 | longs, or can be a function that evaluate a variable and returns a | 223 | longs, or can be a function that evaluates a variable and returns a |
4837 | 126 | bool. | 224 | bool. |
4838 | 127 | """ | 225 | """ |
4839 | 128 | self.log.debug('actual: {}'.format(repr(actual))) | 226 | self.log.debug('actual: {}'.format(repr(actual))) |
4840 | @@ -133,8 +231,10 @@ | |||
4841 | 133 | if (isinstance(v, six.string_types) or | 231 | if (isinstance(v, six.string_types) or |
4842 | 134 | isinstance(v, bool) or | 232 | isinstance(v, bool) or |
4843 | 135 | isinstance(v, six.integer_types)): | 233 | isinstance(v, six.integer_types)): |
4844 | 234 | # handle explicit values | ||
4845 | 136 | if v != actual[k]: | 235 | if v != actual[k]: |
4846 | 137 | return "{}:{}".format(k, actual[k]) | 236 | return "{}:{}".format(k, actual[k]) |
4847 | 237 | # handle function pointers, such as not_null or valid_ip | ||
4848 | 138 | elif not v(actual[k]): | 238 | elif not v(actual[k]): |
4849 | 139 | return "{}:{}".format(k, actual[k]) | 239 | return "{}:{}".format(k, actual[k]) |
4850 | 140 | else: | 240 | else: |
4851 | @@ -321,3 +421,133 @@ | |||
4852 | 321 | 421 | ||
4853 | 322 | def endpoint_error(self, name, data): | 422 | def endpoint_error(self, name, data): |
4854 | 323 | return 'unexpected endpoint data in {} - {}'.format(name, data) | 423 | return 'unexpected endpoint data in {} - {}'.format(name, data) |
4855 | 424 | |||
4856 | 425 | def get_ubuntu_releases(self): | ||
4857 | 426 | """Return a list of all Ubuntu releases in order of release.""" | ||
4858 | 427 | _d = distro_info.UbuntuDistroInfo() | ||
4859 | 428 | _release_list = _d.all | ||
4860 | 429 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
4861 | 430 | return _release_list | ||
4862 | 431 | |||
4863 | 432 | def file_to_url(self, file_rel_path): | ||
4864 | 433 | """Convert a relative file path to a file URL.""" | ||
4865 | 434 | _abs_path = os.path.abspath(file_rel_path) | ||
4866 | 435 | return urlparse.urlparse(_abs_path, scheme='file').geturl() | ||
4867 | 436 | |||
4868 | 437 | def check_commands_on_units(self, commands, sentry_units): | ||
4869 | 438 | """Check that all commands in a list exit zero on all | ||
4870 | 439 | sentry units in a list. | ||
4871 | 440 | |||
4872 | 441 | :param commands: list of bash commands | ||
4873 | 442 | :param sentry_units: list of sentry unit pointers | ||
4874 | 443 | :returns: None if successful; Failure message otherwise | ||
4875 | 444 | """ | ||
4876 | 445 | self.log.debug('Checking exit codes for {} commands on {} ' | ||
4877 | 446 | 'sentry units...'.format(len(commands), | ||
4878 | 447 | len(sentry_units))) | ||
4879 | 448 | for sentry_unit in sentry_units: | ||
4880 | 449 | for cmd in commands: | ||
4881 | 450 | output, code = sentry_unit.run(cmd) | ||
4882 | 451 | if code == 0: | ||
4883 | 452 | self.log.debug('{} `{}` returned {} ' | ||
4884 | 453 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
4885 | 454 | cmd, code)) | ||
4886 | 455 | else: | ||
4887 | 456 | return ('{} `{}` returned {} ' | ||
4888 | 457 | '{}'.format(sentry_unit.info['unit_name'], | ||
4889 | 458 | cmd, code, output)) | ||
4890 | 459 | return None | ||
4891 | 460 | |||
4892 | 461 | def get_process_id_list(self, sentry_unit, process_name): | ||
4893 | 462 | """Get a list of process ID(s) from a single sentry juju unit | ||
4894 | 463 | for a single process name. | ||
4895 | 464 | |||
4896 | 465 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
4897 | 466 | :param process_name: Process name | ||
4898 | 467 | :returns: List of process IDs | ||
4899 | 468 | """ | ||
4900 | 469 | cmd = 'pidof {}'.format(process_name) | ||
4901 | 470 | output, code = sentry_unit.run(cmd) | ||
4902 | 471 | if code != 0: | ||
4903 | 472 | msg = ('{} `{}` returned {} ' | ||
4904 | 473 | '{}'.format(sentry_unit.info['unit_name'], | ||
4905 | 474 | cmd, code, output)) | ||
4906 | 475 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
4907 | 476 | return str(output).split() | ||
4908 | 477 | |||
4909 | 478 | def get_unit_process_ids(self, unit_processes): | ||
4910 | 479 | """Construct a dict containing unit sentries, process names, and | ||
4911 | 480 | process IDs.""" | ||
4912 | 481 | pid_dict = {} | ||
4913 | 482 | for sentry_unit, process_list in unit_processes.iteritems(): | ||
4914 | 483 | pid_dict[sentry_unit] = {} | ||
4915 | 484 | for process in process_list: | ||
4916 | 485 | pids = self.get_process_id_list(sentry_unit, process) | ||
4917 | 486 | pid_dict[sentry_unit].update({process: pids}) | ||
4918 | 487 | return pid_dict | ||
4919 | 488 | |||
4920 | 489 | def validate_unit_process_ids(self, expected, actual): | ||
4921 | 490 | """Validate process id quantities for services on units.""" | ||
4922 | 491 | self.log.debug('Checking units for running processes...') | ||
4923 | 492 | self.log.debug('Expected PIDs: {}'.format(expected)) | ||
4924 | 493 | self.log.debug('Actual PIDs: {}'.format(actual)) | ||
4925 | 494 | |||
4926 | 495 | if len(actual) != len(expected): | ||
4927 | 496 | return ('Unit count mismatch. expected, actual: {}, ' | ||
4928 | 497 | '{} '.format(len(expected), len(actual))) | ||
4929 | 498 | |||
4930 | 499 | for (e_sentry, e_proc_names) in expected.iteritems(): | ||
4931 | 500 | e_sentry_name = e_sentry.info['unit_name'] | ||
4932 | 501 | if e_sentry in actual.keys(): | ||
4933 | 502 | a_proc_names = actual[e_sentry] | ||
4934 | 503 | else: | ||
4935 | 504 | return ('Expected sentry ({}) not found in actual dict data.' | ||
4936 | 505 | '{}'.format(e_sentry_name, e_sentry)) | ||
4937 | 506 | |||
4938 | 507 | if len(e_proc_names.keys()) != len(a_proc_names.keys()): | ||
4939 | 508 | return ('Process name count mismatch. expected, actual: {}, ' | ||
4940 | 509 | '{}'.format(len(expected), len(actual))) | ||
4941 | 510 | |||
4942 | 511 | for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ | ||
4943 | 512 | zip(e_proc_names.items(), a_proc_names.items()): | ||
4944 | 513 | if e_proc_name != a_proc_name: | ||
4945 | 514 | return ('Process name mismatch. expected, actual: {}, ' | ||
4946 | 515 | '{}'.format(e_proc_name, a_proc_name)) | ||
4947 | 516 | |||
4948 | 517 | a_pids_length = len(a_pids) | ||
4949 | 518 | fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' | ||
4950 | 519 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, | ||
4951 | 520 | e_pids_length, a_pids_length, | ||
4952 | 521 | a_pids)) | ||
4953 | 522 | |||
4954 | 523 | # If expected is not bool, ensure PID quantities match | ||
4955 | 524 | if not isinstance(e_pids_length, bool) and \ | ||
4956 | 525 | a_pids_length != e_pids_length: | ||
4957 | 526 | return fail_msg | ||
4958 | 527 | # If expected is bool True, ensure 1 or more PIDs exist | ||
4959 | 528 | elif isinstance(e_pids_length, bool) and \ | ||
4960 | 529 | e_pids_length is True and a_pids_length < 1: | ||
4961 | 530 | return fail_msg | ||
4962 | 531 | # If expected is bool False, ensure 0 PIDs exist | ||
4963 | 532 | elif isinstance(e_pids_length, bool) and \ | ||
4964 | 533 | e_pids_length is False and a_pids_length != 0: | ||
4965 | 534 | return fail_msg | ||
4966 | 535 | else: | ||
4967 | 536 | self.log.debug('PID check OK: {} {} {}: ' | ||
4968 | 537 | '{}'.format(e_sentry_name, e_proc_name, | ||
4969 | 538 | e_pids_length, a_pids)) | ||
4970 | 539 | return None | ||
4971 | 540 | |||
4972 | 541 | def validate_list_of_identical_dicts(self, list_of_dicts): | ||
4973 | 542 | """Check that all dicts within a list are identical.""" | ||
4974 | 543 | hashes = [] | ||
4975 | 544 | for _dict in list_of_dicts: | ||
4976 | 545 | hashes.append(hash(frozenset(_dict.items()))) | ||
4977 | 546 | |||
4978 | 547 | self.log.debug('Hashes: {}'.format(hashes)) | ||
4979 | 548 | if len(set(hashes)) == 1: | ||
4980 | 549 | self.log.debug('Dicts within list are identical') | ||
4981 | 550 | else: | ||
4982 | 551 | return 'Dicts within list are not identical' | ||
4983 | 552 | |||
4984 | 553 | return None | ||
4985 | 324 | 554 | ||
4986 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
4987 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 08:25:28 +0000 | |||
4988 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000 | |||
4989 | @@ -44,7 +44,7 @@ | |||
4990 | 44 | Determine if the local branch being tested is derived from its | 44 | Determine if the local branch being tested is derived from its |
4991 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 45 | stable or next (dev) branch, and based on this, use the corresonding |
4992 | 46 | stable or next branches for the other_services.""" | 46 | stable or next branches for the other_services.""" |
4994 | 47 | base_charms = ['mysql', 'mongodb'] | 47 | base_charms = ['mysql', 'mongodb', 'nrpe'] |
4995 | 48 | 48 | ||
4996 | 49 | if self.series in ['precise', 'trusty']: | 49 | if self.series in ['precise', 'trusty']: |
4997 | 50 | base_series = self.series | 50 | base_series = self.series |
4998 | @@ -83,9 +83,10 @@ | |||
4999 | 83 | services.append(this_service) | 83 | services.append(this_service) |
5000 | 84 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 84 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
The diff has been truncated for viewing.
Just a few niggles - but need taking care of.