Merge lp:~thumper/charms/trusty/python-django/clean-contrib into lp:charms/python-django
- Trusty Tahr (14.04)
- clean-contrib
- Merge into trunk
Proposed by
Tim Penhey
Status: | Merged |
---|---|
Merged at revision: | 39 |
Proposed branch: | lp:~thumper/charms/trusty/python-django/clean-contrib |
Merge into: | lp:charms/python-django |
Diff against target: |
5075 lines (+0/-4900) 29 files modified
charm-helpers.yaml (+0/-1) hooks/charmhelpers/contrib/ansible/__init__.py (+0/-165) hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-184) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-216) hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156) hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-59) hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-183) hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602) hooks/charmhelpers/contrib/network/ip.py (+0/-69) hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-75) hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-17) hooks/charmhelpers/contrib/openstack/context.py (+0/-700) hooks/charmhelpers/contrib/openstack/neutron.py (+0/-171) hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-2) hooks/charmhelpers/contrib/openstack/templating.py (+0/-280) hooks/charmhelpers/contrib/openstack/utils.py (+0/-450) hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-83) hooks/charmhelpers/contrib/python/packages.py (+0/-76) hooks/charmhelpers/contrib/python/version.py (+0/-18) hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-102) hooks/charmhelpers/contrib/ssl/__init__.py (+0/-78) hooks/charmhelpers/contrib/ssl/service.py (+0/-267) hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-387) hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-62) hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-88) hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-35) hooks/charmhelpers/contrib/templating/contexts.py (+0/-104) hooks/charmhelpers/contrib/templating/pyformat.py (+0/-13) hooks/charmhelpers/contrib/unison/__init__.py (+0/-257) |
To merge this branch: | bzr merge lp:~thumper/charms/trusty/python-django/clean-contrib |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Tim Van Steenburgh (community) | Approve | ||
Review via email: mp+260388@code.launchpad.net |
Commit message
Description of the change
This branch just removes the charmhelpers/
It isn't used anywhere.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers.yaml' | |||
2 | --- charm-helpers.yaml 2013-11-26 17:12:54 +0000 | |||
3 | +++ charm-helpers.yaml 2015-05-27 22:03:53 +0000 | |||
4 | @@ -3,4 +3,3 @@ | |||
5 | 3 | include: | 3 | include: |
6 | 4 | - core | 4 | - core |
7 | 5 | - fetch | 5 | - fetch |
8 | 6 | - contrib | ||
9 | 7 | 6 | ||
10 | === removed directory 'hooks/charmhelpers/contrib' | |||
11 | === removed file 'hooks/charmhelpers/contrib/__init__.py' | |||
12 | === removed directory 'hooks/charmhelpers/contrib/ansible' | |||
13 | === removed file 'hooks/charmhelpers/contrib/ansible/__init__.py' | |||
14 | --- hooks/charmhelpers/contrib/ansible/__init__.py 2013-11-26 17:12:54 +0000 | |||
15 | +++ hooks/charmhelpers/contrib/ansible/__init__.py 1970-01-01 00:00:00 +0000 | |||
16 | @@ -1,165 +0,0 @@ | |||
17 | 1 | # Copyright 2013 Canonical Ltd. | ||
18 | 2 | # | ||
19 | 3 | # Authors: | ||
20 | 4 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
21 | 5 | """Charm Helpers ansible - declare the state of your machines. | ||
22 | 6 | |||
23 | 7 | This helper enables you to declare your machine state, rather than | ||
24 | 8 | program it procedurally (and have to test each change to your procedures). | ||
25 | 9 | Your install hook can be as simple as: | ||
26 | 10 | |||
27 | 11 | {{{ | ||
28 | 12 | import charmhelpers.contrib.ansible | ||
29 | 13 | |||
30 | 14 | |||
31 | 15 | def install(): | ||
32 | 16 | charmhelpers.contrib.ansible.install_ansible_support() | ||
33 | 17 | charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml') | ||
34 | 18 | }}} | ||
35 | 19 | |||
36 | 20 | and won't need to change (nor will its tests) when you change the machine | ||
37 | 21 | state. | ||
38 | 22 | |||
39 | 23 | All of your juju config and relation-data are available as template | ||
40 | 24 | variables within your playbooks and templates. An install playbook looks | ||
41 | 25 | something like: | ||
42 | 26 | |||
43 | 27 | {{{ | ||
44 | 28 | --- | ||
45 | 29 | - hosts: localhost | ||
46 | 30 | user: root | ||
47 | 31 | |||
48 | 32 | tasks: | ||
49 | 33 | - name: Add private repositories. | ||
50 | 34 | template: | ||
51 | 35 | src: ../templates/private-repositories.list.jinja2 | ||
52 | 36 | dest: /etc/apt/sources.list.d/private.list | ||
53 | 37 | |||
54 | 38 | - name: Update the cache. | ||
55 | 39 | apt: update_cache=yes | ||
56 | 40 | |||
57 | 41 | - name: Install dependencies. | ||
58 | 42 | apt: pkg={{ item }} | ||
59 | 43 | with_items: | ||
60 | 44 | - python-mimeparse | ||
61 | 45 | - python-webob | ||
62 | 46 | - sunburnt | ||
63 | 47 | |||
64 | 48 | - name: Setup groups. | ||
65 | 49 | group: name={{ item.name }} gid={{ item.gid }} | ||
66 | 50 | with_items: | ||
67 | 51 | - { name: 'deploy_user', gid: 1800 } | ||
68 | 52 | - { name: 'service_user', gid: 1500 } | ||
69 | 53 | |||
70 | 54 | ... | ||
71 | 55 | }}} | ||
72 | 56 | |||
73 | 57 | Read more online about playbooks[1] and standard ansible modules[2]. | ||
74 | 58 | |||
75 | 59 | [1] http://www.ansibleworks.com/docs/playbooks.html | ||
76 | 60 | [2] http://www.ansibleworks.com/docs/modules.html | ||
77 | 61 | """ | ||
78 | 62 | import os | ||
79 | 63 | import subprocess | ||
80 | 64 | |||
81 | 65 | import charmhelpers.contrib.templating.contexts | ||
82 | 66 | import charmhelpers.core.host | ||
83 | 67 | import charmhelpers.core.hookenv | ||
84 | 68 | import charmhelpers.fetch | ||
85 | 69 | |||
86 | 70 | |||
87 | 71 | charm_dir = os.environ.get('CHARM_DIR', '') | ||
88 | 72 | ansible_hosts_path = '/etc/ansible/hosts' | ||
89 | 73 | # Ansible will automatically include any vars in the following | ||
90 | 74 | # file in its inventory when run locally. | ||
91 | 75 | ansible_vars_path = '/etc/ansible/host_vars/localhost' | ||
92 | 76 | |||
93 | 77 | |||
94 | 78 | def install_ansible_support(from_ppa=True): | ||
95 | 79 | """Installs the ansible package. | ||
96 | 80 | |||
97 | 81 | By default it is installed from the PPA [1] linked from | ||
98 | 82 | the ansible website [2]. | ||
99 | 83 | |||
100 | 84 | [1] https://launchpad.net/~rquillo/+archive/ansible | ||
101 | 85 | [2] http://www.ansibleworks.com/docs/gettingstarted.html#ubuntu-and-debian | ||
102 | 86 | |||
103 | 87 | If from_ppa is false, you must ensure that the package is available | ||
104 | 88 | from a configured repository. | ||
105 | 89 | """ | ||
106 | 90 | if from_ppa: | ||
107 | 91 | charmhelpers.fetch.add_source('ppa:rquillo/ansible') | ||
108 | 92 | charmhelpers.fetch.apt_update(fatal=True) | ||
109 | 93 | charmhelpers.fetch.apt_install('ansible') | ||
110 | 94 | with open(ansible_hosts_path, 'w+') as hosts_file: | ||
111 | 95 | hosts_file.write('localhost ansible_connection=local') | ||
112 | 96 | |||
113 | 97 | |||
114 | 98 | def apply_playbook(playbook, tags=None): | ||
115 | 99 | tags = tags or [] | ||
116 | 100 | tags = ",".join(tags) | ||
117 | 101 | charmhelpers.contrib.templating.contexts.juju_state_to_yaml( | ||
118 | 102 | ansible_vars_path, namespace_separator='__', | ||
119 | 103 | allow_hyphens_in_keys=False) | ||
120 | 104 | call = [ | ||
121 | 105 | 'ansible-playbook', | ||
122 | 106 | '-c', | ||
123 | 107 | 'local', | ||
124 | 108 | playbook, | ||
125 | 109 | ] | ||
126 | 110 | if tags: | ||
127 | 111 | call.extend(['--tags', '{}'.format(tags)]) | ||
128 | 112 | subprocess.check_call(call) | ||
129 | 113 | |||
130 | 114 | |||
131 | 115 | class AnsibleHooks(charmhelpers.core.hookenv.Hooks): | ||
132 | 116 | """Run a playbook with the hook-name as the tag. | ||
133 | 117 | |||
134 | 118 | This helper builds on the standard hookenv.Hooks helper, | ||
135 | 119 | but additionally runs the playbook with the hook-name specified | ||
136 | 120 | using --tags (ie. running all the tasks tagged with the hook-name). | ||
137 | 121 | |||
138 | 122 | Example: | ||
139 | 123 | hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml') | ||
140 | 124 | |||
141 | 125 | # All the tasks within my_machine_state.yaml tagged with 'install' | ||
142 | 126 | # will be run automatically after do_custom_work() | ||
143 | 127 | @hooks.hook() | ||
144 | 128 | def install(): | ||
145 | 129 | do_custom_work() | ||
146 | 130 | |||
147 | 131 | # For most of your hooks, you won't need to do anything other | ||
148 | 132 | # than run the tagged tasks for the hook: | ||
149 | 133 | @hooks.hook('config-changed', 'start', 'stop') | ||
150 | 134 | def just_use_playbook(): | ||
151 | 135 | pass | ||
152 | 136 | |||
153 | 137 | # As a convenience, you can avoid the above noop function by specifying | ||
154 | 138 | # the hooks which are handled by ansible-only and they'll be registered | ||
155 | 139 | # for you: | ||
156 | 140 | # hooks = AnsibleHooks( | ||
157 | 141 | # 'playbooks/my_machine_state.yaml', | ||
158 | 142 | # default_hooks=['config-changed', 'start', 'stop']) | ||
159 | 143 | |||
160 | 144 | if __name__ == "__main__": | ||
161 | 145 | # execute a hook based on the name the program is called by | ||
162 | 146 | hooks.execute(sys.argv) | ||
163 | 147 | """ | ||
164 | 148 | |||
165 | 149 | def __init__(self, playbook_path, default_hooks=None): | ||
166 | 150 | """Register any hooks handled by ansible.""" | ||
167 | 151 | super(AnsibleHooks, self).__init__() | ||
168 | 152 | |||
169 | 153 | self.playbook_path = playbook_path | ||
170 | 154 | |||
171 | 155 | default_hooks = default_hooks or [] | ||
172 | 156 | noop = lambda *args, **kwargs: None | ||
173 | 157 | for hook in default_hooks: | ||
174 | 158 | self.register(hook, noop) | ||
175 | 159 | |||
176 | 160 | def execute(self, args): | ||
177 | 161 | """Execute the hook followed by the playbook using the hook as tag.""" | ||
178 | 162 | super(AnsibleHooks, self).execute(args) | ||
179 | 163 | hook_name = os.path.basename(args[0]) | ||
180 | 164 | charmhelpers.contrib.ansible.apply_playbook( | ||
181 | 165 | self.playbook_path, tags=[hook_name]) | ||
182 | 166 | 0 | ||
183 | === removed directory 'hooks/charmhelpers/contrib/charmhelpers' | |||
184 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py' | |||
185 | --- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-11-26 17:12:54 +0000 | |||
186 | +++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 | |||
187 | @@ -1,184 +0,0 @@ | |||
188 | 1 | # Copyright 2012 Canonical Ltd. This software is licensed under the | ||
189 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
190 | 3 | |||
191 | 4 | import warnings | ||
192 | 5 | warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) | ||
193 | 6 | |||
194 | 7 | """Helper functions for writing Juju charms in Python.""" | ||
195 | 8 | |||
196 | 9 | __metaclass__ = type | ||
197 | 10 | __all__ = [ | ||
198 | 11 | #'get_config', # core.hookenv.config() | ||
199 | 12 | #'log', # core.hookenv.log() | ||
200 | 13 | #'log_entry', # core.hookenv.log() | ||
201 | 14 | #'log_exit', # core.hookenv.log() | ||
202 | 15 | #'relation_get', # core.hookenv.relation_get() | ||
203 | 16 | #'relation_set', # core.hookenv.relation_set() | ||
204 | 17 | #'relation_ids', # core.hookenv.relation_ids() | ||
205 | 18 | #'relation_list', # core.hookenv.relation_units() | ||
206 | 19 | #'config_get', # core.hookenv.config() | ||
207 | 20 | #'unit_get', # core.hookenv.unit_get() | ||
208 | 21 | #'open_port', # core.hookenv.open_port() | ||
209 | 22 | #'close_port', # core.hookenv.close_port() | ||
210 | 23 | #'service_control', # core.host.service() | ||
211 | 24 | 'unit_info', # client-side, NOT IMPLEMENTED | ||
212 | 25 | 'wait_for_machine', # client-side, NOT IMPLEMENTED | ||
213 | 26 | 'wait_for_page_contents', # client-side, NOT IMPLEMENTED | ||
214 | 27 | 'wait_for_relation', # client-side, NOT IMPLEMENTED | ||
215 | 28 | 'wait_for_unit', # client-side, NOT IMPLEMENTED | ||
216 | 29 | ] | ||
217 | 30 | |||
218 | 31 | import operator | ||
219 | 32 | from shelltoolbox import ( | ||
220 | 33 | command, | ||
221 | 34 | ) | ||
222 | 35 | import tempfile | ||
223 | 36 | import time | ||
224 | 37 | import urllib2 | ||
225 | 38 | import yaml | ||
226 | 39 | |||
227 | 40 | SLEEP_AMOUNT = 0.1 | ||
228 | 41 | # We create a juju_status Command here because it makes testing much, | ||
229 | 42 | # much easier. | ||
230 | 43 | juju_status = lambda: command('juju')('status') | ||
231 | 44 | |||
232 | 45 | # re-implemented as charmhelpers.fetch.configure_sources() | ||
233 | 46 | #def configure_source(update=False): | ||
234 | 47 | # source = config_get('source') | ||
235 | 48 | # if ((source.startswith('ppa:') or | ||
236 | 49 | # source.startswith('cloud:') or | ||
237 | 50 | # source.startswith('http:'))): | ||
238 | 51 | # run('add-apt-repository', source) | ||
239 | 52 | # if source.startswith("http:"): | ||
240 | 53 | # run('apt-key', 'import', config_get('key')) | ||
241 | 54 | # if update: | ||
242 | 55 | # run('apt-get', 'update') | ||
243 | 56 | |||
244 | 57 | |||
245 | 58 | # DEPRECATED: client-side only | ||
246 | 59 | def make_charm_config_file(charm_config): | ||
247 | 60 | charm_config_file = tempfile.NamedTemporaryFile() | ||
248 | 61 | charm_config_file.write(yaml.dump(charm_config)) | ||
249 | 62 | charm_config_file.flush() | ||
250 | 63 | # The NamedTemporaryFile instance is returned instead of just the name | ||
251 | 64 | # because we want to take advantage of garbage collection-triggered | ||
252 | 65 | # deletion of the temp file when it goes out of scope in the caller. | ||
253 | 66 | return charm_config_file | ||
254 | 67 | |||
255 | 68 | |||
256 | 69 | # DEPRECATED: client-side only | ||
257 | 70 | def unit_info(service_name, item_name, data=None, unit=None): | ||
258 | 71 | if data is None: | ||
259 | 72 | data = yaml.safe_load(juju_status()) | ||
260 | 73 | service = data['services'].get(service_name) | ||
261 | 74 | if service is None: | ||
262 | 75 | # XXX 2012-02-08 gmb: | ||
263 | 76 | # This allows us to cope with the race condition that we | ||
264 | 77 | # have between deploying a service and having it come up in | ||
265 | 78 | # `juju status`. We could probably do with cleaning it up so | ||
266 | 79 | # that it fails a bit more noisily after a while. | ||
267 | 80 | return '' | ||
268 | 81 | units = service['units'] | ||
269 | 82 | if unit is not None: | ||
270 | 83 | item = units[unit][item_name] | ||
271 | 84 | else: | ||
272 | 85 | # It might seem odd to sort the units here, but we do it to | ||
273 | 86 | # ensure that when no unit is specified, the first unit for the | ||
274 | 87 | # service (or at least the one with the lowest number) is the | ||
275 | 88 | # one whose data gets returned. | ||
276 | 89 | sorted_unit_names = sorted(units.keys()) | ||
277 | 90 | item = units[sorted_unit_names[0]][item_name] | ||
278 | 91 | return item | ||
279 | 92 | |||
280 | 93 | |||
281 | 94 | # DEPRECATED: client-side only | ||
282 | 95 | def get_machine_data(): | ||
283 | 96 | return yaml.safe_load(juju_status())['machines'] | ||
284 | 97 | |||
285 | 98 | |||
286 | 99 | # DEPRECATED: client-side only | ||
287 | 100 | def wait_for_machine(num_machines=1, timeout=300): | ||
288 | 101 | """Wait `timeout` seconds for `num_machines` machines to come up. | ||
289 | 102 | |||
290 | 103 | This wait_for... function can be called by other wait_for functions | ||
291 | 104 | whose timeouts might be too short in situations where only a bare | ||
292 | 105 | Juju setup has been bootstrapped. | ||
293 | 106 | |||
294 | 107 | :return: A tuple of (num_machines, time_taken). This is used for | ||
295 | 108 | testing. | ||
296 | 109 | """ | ||
297 | 110 | # You may think this is a hack, and you'd be right. The easiest way | ||
298 | 111 | # to tell what environment we're working in (LXC vs EC2) is to check | ||
299 | 112 | # the dns-name of the first machine. If it's localhost we're in LXC | ||
300 | 113 | # and we can just return here. | ||
301 | 114 | if get_machine_data()[0]['dns-name'] == 'localhost': | ||
302 | 115 | return 1, 0 | ||
303 | 116 | start_time = time.time() | ||
304 | 117 | while True: | ||
305 | 118 | # Drop the first machine, since it's the Zookeeper and that's | ||
306 | 119 | # not a machine that we need to wait for. This will only work | ||
307 | 120 | # for EC2 environments, which is why we return early above if | ||
308 | 121 | # we're in LXC. | ||
309 | 122 | machine_data = get_machine_data() | ||
310 | 123 | non_zookeeper_machines = [ | ||
311 | 124 | machine_data[key] for key in machine_data.keys()[1:]] | ||
312 | 125 | if len(non_zookeeper_machines) >= num_machines: | ||
313 | 126 | all_machines_running = True | ||
314 | 127 | for machine in non_zookeeper_machines: | ||
315 | 128 | if machine.get('instance-state') != 'running': | ||
316 | 129 | all_machines_running = False | ||
317 | 130 | break | ||
318 | 131 | if all_machines_running: | ||
319 | 132 | break | ||
320 | 133 | if time.time() - start_time >= timeout: | ||
321 | 134 | raise RuntimeError('timeout waiting for service to start') | ||
322 | 135 | time.sleep(SLEEP_AMOUNT) | ||
323 | 136 | return num_machines, time.time() - start_time | ||
324 | 137 | |||
325 | 138 | |||
326 | 139 | # DEPRECATED: client-side only | ||
327 | 140 | def wait_for_unit(service_name, timeout=480): | ||
328 | 141 | """Wait `timeout` seconds for a given service name to come up.""" | ||
329 | 142 | wait_for_machine(num_machines=1) | ||
330 | 143 | start_time = time.time() | ||
331 | 144 | while True: | ||
332 | 145 | state = unit_info(service_name, 'agent-state') | ||
333 | 146 | if 'error' in state or state == 'started': | ||
334 | 147 | break | ||
335 | 148 | if time.time() - start_time >= timeout: | ||
336 | 149 | raise RuntimeError('timeout waiting for service to start') | ||
337 | 150 | time.sleep(SLEEP_AMOUNT) | ||
338 | 151 | if state != 'started': | ||
339 | 152 | raise RuntimeError('unit did not start, agent-state: ' + state) | ||
340 | 153 | |||
341 | 154 | |||
342 | 155 | # DEPRECATED: client-side only | ||
343 | 156 | def wait_for_relation(service_name, relation_name, timeout=120): | ||
344 | 157 | """Wait `timeout` seconds for a given relation to come up.""" | ||
345 | 158 | start_time = time.time() | ||
346 | 159 | while True: | ||
347 | 160 | relation = unit_info(service_name, 'relations').get(relation_name) | ||
348 | 161 | if relation is not None and relation['state'] == 'up': | ||
349 | 162 | break | ||
350 | 163 | if time.time() - start_time >= timeout: | ||
351 | 164 | raise RuntimeError('timeout waiting for relation to be up') | ||
352 | 165 | time.sleep(SLEEP_AMOUNT) | ||
353 | 166 | |||
354 | 167 | |||
355 | 168 | # DEPRECATED: client-side only | ||
356 | 169 | def wait_for_page_contents(url, contents, timeout=120, validate=None): | ||
357 | 170 | if validate is None: | ||
358 | 171 | validate = operator.contains | ||
359 | 172 | start_time = time.time() | ||
360 | 173 | while True: | ||
361 | 174 | try: | ||
362 | 175 | stream = urllib2.urlopen(url) | ||
363 | 176 | except (urllib2.HTTPError, urllib2.URLError): | ||
364 | 177 | pass | ||
365 | 178 | else: | ||
366 | 179 | page = stream.read() | ||
367 | 180 | if validate(page, contents): | ||
368 | 181 | return page | ||
369 | 182 | if time.time() - start_time >= timeout: | ||
370 | 183 | raise RuntimeError('timeout waiting for contents of ' + url) | ||
371 | 184 | time.sleep(SLEEP_AMOUNT) | ||
372 | 185 | 0 | ||
373 | === removed directory 'hooks/charmhelpers/contrib/charmsupport' | |||
374 | === removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py' | |||
375 | === removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
376 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-11-26 17:12:54 +0000 | |||
377 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 | |||
378 | @@ -1,216 +0,0 @@ | |||
379 | 1 | """Compatibility with the nrpe-external-master charm""" | ||
380 | 2 | # Copyright 2012 Canonical Ltd. | ||
381 | 3 | # | ||
382 | 4 | # Authors: | ||
383 | 5 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | ||
384 | 6 | |||
385 | 7 | import subprocess | ||
386 | 8 | import pwd | ||
387 | 9 | import grp | ||
388 | 10 | import os | ||
389 | 11 | import re | ||
390 | 12 | import shlex | ||
391 | 13 | import yaml | ||
392 | 14 | |||
393 | 15 | from charmhelpers.core.hookenv import ( | ||
394 | 16 | config, | ||
395 | 17 | local_unit, | ||
396 | 18 | log, | ||
397 | 19 | relation_ids, | ||
398 | 20 | relation_set, | ||
399 | 21 | ) | ||
400 | 22 | |||
401 | 23 | from charmhelpers.core.host import service | ||
402 | 24 | |||
403 | 25 | # This module adds compatibility with the nrpe-external-master and plain nrpe | ||
404 | 26 | # subordinate charms. To use it in your charm: | ||
405 | 27 | # | ||
406 | 28 | # 1. Update metadata.yaml | ||
407 | 29 | # | ||
408 | 30 | # provides: | ||
409 | 31 | # (...) | ||
410 | 32 | # nrpe-external-master: | ||
411 | 33 | # interface: nrpe-external-master | ||
412 | 34 | # scope: container | ||
413 | 35 | # | ||
414 | 36 | # and/or | ||
415 | 37 | # | ||
416 | 38 | # provides: | ||
417 | 39 | # (...) | ||
418 | 40 | # local-monitors: | ||
419 | 41 | # interface: local-monitors | ||
420 | 42 | # scope: container | ||
421 | 43 | |||
422 | 44 | # | ||
423 | 45 | # 2. Add the following to config.yaml | ||
424 | 46 | # | ||
425 | 47 | # nagios_context: | ||
426 | 48 | # default: "juju" | ||
427 | 49 | # type: string | ||
428 | 50 | # description: | | ||
429 | 51 | # Used by the nrpe subordinate charms. | ||
430 | 52 | # A string that will be prepended to instance name to set the host name | ||
431 | 53 | # in nagios. So for instance the hostname would be something like: | ||
432 | 54 | # juju-myservice-0 | ||
433 | 55 | # If you're running multiple environments with the same services in them | ||
434 | 56 | # this allows you to differentiate between them. | ||
435 | 57 | # | ||
436 | 58 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master | ||
437 | 59 | # | ||
438 | 60 | # 4. Update your hooks.py with something like this: | ||
439 | 61 | # | ||
440 | 62 | # from charmsupport.nrpe import NRPE | ||
441 | 63 | # (...) | ||
442 | 64 | # def update_nrpe_config(): | ||
443 | 65 | # nrpe_compat = NRPE() | ||
444 | 66 | # nrpe_compat.add_check( | ||
445 | 67 | # shortname = "myservice", | ||
446 | 68 | # description = "Check MyService", | ||
447 | 69 | # check_cmd = "check_http -w 2 -c 10 http://localhost" | ||
448 | 70 | # ) | ||
449 | 71 | # nrpe_compat.add_check( | ||
450 | 72 | # "myservice_other", | ||
451 | 73 | # "Check for widget failures", | ||
452 | 74 | # check_cmd = "/srv/myapp/scripts/widget_check" | ||
453 | 75 | # ) | ||
454 | 76 | # nrpe_compat.write() | ||
455 | 77 | # | ||
456 | 78 | # def config_changed(): | ||
457 | 79 | # (...) | ||
458 | 80 | # update_nrpe_config() | ||
459 | 81 | # | ||
460 | 82 | # def nrpe_external_master_relation_changed(): | ||
461 | 83 | # update_nrpe_config() | ||
462 | 84 | # | ||
463 | 85 | # def local_monitors_relation_changed(): | ||
464 | 86 | # update_nrpe_config() | ||
465 | 87 | # | ||
466 | 88 | # 5. ln -s hooks.py nrpe-external-master-relation-changed | ||
467 | 89 | # ln -s hooks.py local-monitors-relation-changed | ||
468 | 90 | |||
469 | 91 | |||
470 | 92 | class CheckException(Exception): | ||
471 | 93 | pass | ||
472 | 94 | |||
473 | 95 | |||
474 | 96 | class Check(object): | ||
475 | 97 | shortname_re = '[A-Za-z0-9-_]+$' | ||
476 | 98 | service_template = (""" | ||
477 | 99 | #--------------------------------------------------- | ||
478 | 100 | # This file is Juju managed | ||
479 | 101 | #--------------------------------------------------- | ||
480 | 102 | define service {{ | ||
481 | 103 | use active-service | ||
482 | 104 | host_name {nagios_hostname} | ||
483 | 105 | service_description {nagios_hostname}[{shortname}] """ | ||
484 | 106 | """{description} | ||
485 | 107 | check_command check_nrpe!{command} | ||
486 | 108 | servicegroups {nagios_servicegroup} | ||
487 | 109 | }} | ||
488 | 110 | """) | ||
489 | 111 | |||
490 | 112 | def __init__(self, shortname, description, check_cmd): | ||
491 | 113 | super(Check, self).__init__() | ||
492 | 114 | # XXX: could be better to calculate this from the service name | ||
493 | 115 | if not re.match(self.shortname_re, shortname): | ||
494 | 116 | raise CheckException("shortname must match {}".format( | ||
495 | 117 | Check.shortname_re)) | ||
496 | 118 | self.shortname = shortname | ||
497 | 119 | self.command = "check_{}".format(shortname) | ||
498 | 120 | # Note: a set of invalid characters is defined by the | ||
499 | 121 | # Nagios server config | ||
500 | 122 | # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= | ||
501 | 123 | self.description = description | ||
502 | 124 | self.check_cmd = self._locate_cmd(check_cmd) | ||
503 | 125 | |||
504 | 126 | def _locate_cmd(self, check_cmd): | ||
505 | 127 | search_path = ( | ||
506 | 128 | '/usr/lib/nagios/plugins', | ||
507 | 129 | '/usr/local/lib/nagios/plugins', | ||
508 | 130 | ) | ||
509 | 131 | parts = shlex.split(check_cmd) | ||
510 | 132 | for path in search_path: | ||
511 | 133 | if os.path.exists(os.path.join(path, parts[0])): | ||
512 | 134 | command = os.path.join(path, parts[0]) | ||
513 | 135 | if len(parts) > 1: | ||
514 | 136 | command += " " + " ".join(parts[1:]) | ||
515 | 137 | return command | ||
516 | 138 | log('Check command not found: {}'.format(parts[0])) | ||
517 | 139 | return '' | ||
518 | 140 | |||
519 | 141 | def write(self, nagios_context, hostname): | ||
520 | 142 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | ||
521 | 143 | self.command) | ||
522 | 144 | with open(nrpe_check_file, 'w') as nrpe_check_config: | ||
523 | 145 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | ||
524 | 146 | nrpe_check_config.write("command[{}]={}\n".format( | ||
525 | 147 | self.command, self.check_cmd)) | ||
526 | 148 | |||
527 | 149 | if not os.path.exists(NRPE.nagios_exportdir): | ||
528 | 150 | log('Not writing service config as {} is not accessible'.format( | ||
529 | 151 | NRPE.nagios_exportdir)) | ||
530 | 152 | else: | ||
531 | 153 | self.write_service_config(nagios_context, hostname) | ||
532 | 154 | |||
533 | 155 | def write_service_config(self, nagios_context, hostname): | ||
534 | 156 | for f in os.listdir(NRPE.nagios_exportdir): | ||
535 | 157 | if re.search('.*{}.cfg'.format(self.command), f): | ||
536 | 158 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
537 | 159 | |||
538 | 160 | templ_vars = { | ||
539 | 161 | 'nagios_hostname': hostname, | ||
540 | 162 | 'nagios_servicegroup': nagios_context, | ||
541 | 163 | 'description': self.description, | ||
542 | 164 | 'shortname': self.shortname, | ||
543 | 165 | 'command': self.command, | ||
544 | 166 | } | ||
545 | 167 | nrpe_service_text = Check.service_template.format(**templ_vars) | ||
546 | 168 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | ||
547 | 169 | NRPE.nagios_exportdir, hostname, self.command) | ||
548 | 170 | with open(nrpe_service_file, 'w') as nrpe_service_config: | ||
549 | 171 | nrpe_service_config.write(str(nrpe_service_text)) | ||
550 | 172 | |||
551 | 173 | def run(self): | ||
552 | 174 | subprocess.call(self.check_cmd) | ||
553 | 175 | |||
554 | 176 | |||
555 | 177 | class NRPE(object): | ||
556 | 178 | nagios_logdir = '/var/log/nagios' | ||
557 | 179 | nagios_exportdir = '/var/lib/nagios/export' | ||
558 | 180 | nrpe_confdir = '/etc/nagios/nrpe.d' | ||
559 | 181 | |||
560 | 182 | def __init__(self): | ||
561 | 183 | super(NRPE, self).__init__() | ||
562 | 184 | self.config = config() | ||
563 | 185 | self.nagios_context = self.config['nagios_context'] | ||
564 | 186 | self.unit_name = local_unit().replace('/', '-') | ||
565 | 187 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
566 | 188 | self.checks = [] | ||
567 | 189 | |||
568 | 190 | def add_check(self, *args, **kwargs): | ||
569 | 191 | self.checks.append(Check(*args, **kwargs)) | ||
570 | 192 | |||
571 | 193 | def write(self): | ||
572 | 194 | try: | ||
573 | 195 | nagios_uid = pwd.getpwnam('nagios').pw_uid | ||
574 | 196 | nagios_gid = grp.getgrnam('nagios').gr_gid | ||
575 | 197 | except: | ||
576 | 198 | log("Nagios user not set up, nrpe checks not updated") | ||
577 | 199 | return | ||
578 | 200 | |||
579 | 201 | if not os.path.exists(NRPE.nagios_logdir): | ||
580 | 202 | os.mkdir(NRPE.nagios_logdir) | ||
581 | 203 | os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) | ||
582 | 204 | |||
583 | 205 | nrpe_monitors = {} | ||
584 | 206 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} | ||
585 | 207 | for nrpecheck in self.checks: | ||
586 | 208 | nrpecheck.write(self.nagios_context, self.hostname) | ||
587 | 209 | nrpe_monitors[nrpecheck.shortname] = { | ||
588 | 210 | "command": nrpecheck.command, | ||
589 | 211 | } | ||
590 | 212 | |||
591 | 213 | service('restart', 'nagios-nrpe-server') | ||
592 | 214 | |||
593 | 215 | for rid in relation_ids("local-monitors"): | ||
594 | 216 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) | ||
595 | 217 | 0 | ||
596 | === removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' | |||
597 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-11-26 17:12:54 +0000 | |||
598 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 | |||
599 | @@ -1,156 +0,0 @@ | |||
600 | 1 | ''' | ||
601 | 2 | Functions for managing volumes in juju units. One volume is supported per unit. | ||
602 | 3 | Subordinates may have their own storage, provided it is on its own partition. | ||
603 | 4 | |||
604 | 5 | Configuration stanzas: | ||
605 | 6 | volume-ephemeral: | ||
606 | 7 | type: boolean | ||
607 | 8 | default: true | ||
608 | 9 | description: > | ||
609 | 10 | If false, a volume is mounted as sepecified in "volume-map" | ||
610 | 11 | If true, ephemeral storage will be used, meaning that log data | ||
611 | 12 | will only exist as long as the machine. YOU HAVE BEEN WARNED. | ||
612 | 13 | volume-map: | ||
613 | 14 | type: string | ||
614 | 15 | default: {} | ||
615 | 16 | description: > | ||
616 | 17 | YAML map of units to device names, e.g: | ||
617 | 18 | "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" | ||
618 | 19 | Service units will raise a configure-error if volume-ephemeral | ||
619 | 20 | is 'true' and no volume-map value is set. Use 'juju set' to set a | ||
620 | 21 | value and 'juju resolved' to complete configuration. | ||
621 | 22 | |||
622 | 23 | Usage: | ||
623 | 24 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | ||
624 | 25 | from charmsupport.hookenv import log, ERROR | ||
625 | 26 | def post_mount_hook(): | ||
626 | 27 | stop_service('myservice') | ||
627 | 28 | def post_mount_hook(): | ||
628 | 29 | start_service('myservice') | ||
629 | 30 | |||
630 | 31 | if __name__ == '__main__': | ||
631 | 32 | try: | ||
632 | 33 | configure_volume(before_change=pre_mount_hook, | ||
633 | 34 | after_change=post_mount_hook) | ||
634 | 35 | except VolumeConfigurationError: | ||
635 | 36 | log('Storage could not be configured', ERROR) | ||
636 | 37 | ''' | ||
637 | 38 | |||
638 | 39 | # XXX: Known limitations | ||
639 | 40 | # - fstab is neither consulted nor updated | ||
640 | 41 | |||
641 | 42 | import os | ||
642 | 43 | from charmhelpers.core import hookenv | ||
643 | 44 | from charmhelpers.core import host | ||
644 | 45 | import yaml | ||
645 | 46 | |||
646 | 47 | |||
647 | 48 | MOUNT_BASE = '/srv/juju/volumes' | ||
648 | 49 | |||
649 | 50 | |||
650 | 51 | class VolumeConfigurationError(Exception): | ||
651 | 52 | '''Volume configuration data is missing or invalid''' | ||
652 | 53 | pass | ||
653 | 54 | |||
654 | 55 | |||
655 | 56 | def get_config(): | ||
656 | 57 | '''Gather and sanity-check volume configuration data''' | ||
657 | 58 | volume_config = {} | ||
658 | 59 | config = hookenv.config() | ||
659 | 60 | |||
660 | 61 | errors = False | ||
661 | 62 | |||
662 | 63 | if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): | ||
663 | 64 | volume_config['ephemeral'] = True | ||
664 | 65 | else: | ||
665 | 66 | volume_config['ephemeral'] = False | ||
666 | 67 | |||
667 | 68 | try: | ||
668 | 69 | volume_map = yaml.safe_load(config.get('volume-map', '{}')) | ||
669 | 70 | except yaml.YAMLError as e: | ||
670 | 71 | hookenv.log("Error parsing YAML volume-map: {}".format(e), | ||
671 | 72 | hookenv.ERROR) | ||
672 | 73 | errors = True | ||
673 | 74 | if volume_map is None: | ||
674 | 75 | # probably an empty string | ||
675 | 76 | volume_map = {} | ||
676 | 77 | elif not isinstance(volume_map, dict): | ||
677 | 78 | hookenv.log("Volume-map should be a dictionary, not {}".format( | ||
678 | 79 | type(volume_map))) | ||
679 | 80 | errors = True | ||
680 | 81 | |||
681 | 82 | volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) | ||
682 | 83 | if volume_config['device'] and volume_config['ephemeral']: | ||
683 | 84 | # asked for ephemeral storage but also defined a volume ID | ||
684 | 85 | hookenv.log('A volume is defined for this unit, but ephemeral ' | ||
685 | 86 | 'storage was requested', hookenv.ERROR) | ||
686 | 87 | errors = True | ||
687 | 88 | elif not volume_config['device'] and not volume_config['ephemeral']: | ||
688 | 89 | # asked for permanent storage but did not define volume ID | ||
689 | 90 | hookenv.log('Ephemeral storage was requested, but there is no volume ' | ||
690 | 91 | 'defined for this unit.', hookenv.ERROR) | ||
691 | 92 | errors = True | ||
692 | 93 | |||
693 | 94 | unit_mount_name = hookenv.local_unit().replace('/', '-') | ||
694 | 95 | volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) | ||
695 | 96 | |||
696 | 97 | if errors: | ||
697 | 98 | return None | ||
698 | 99 | return volume_config | ||
699 | 100 | |||
700 | 101 | |||
701 | 102 | def mount_volume(config): | ||
702 | 103 | if os.path.exists(config['mountpoint']): | ||
703 | 104 | if not os.path.isdir(config['mountpoint']): | ||
704 | 105 | hookenv.log('Not a directory: {}'.format(config['mountpoint'])) | ||
705 | 106 | raise VolumeConfigurationError() | ||
706 | 107 | else: | ||
707 | 108 | host.mkdir(config['mountpoint']) | ||
708 | 109 | if os.path.ismount(config['mountpoint']): | ||
709 | 110 | unmount_volume(config) | ||
710 | 111 | if not host.mount(config['device'], config['mountpoint'], persist=True): | ||
711 | 112 | raise VolumeConfigurationError() | ||
712 | 113 | |||
713 | 114 | |||
714 | 115 | def unmount_volume(config): | ||
715 | 116 | if os.path.ismount(config['mountpoint']): | ||
716 | 117 | if not host.umount(config['mountpoint'], persist=True): | ||
717 | 118 | raise VolumeConfigurationError() | ||
718 | 119 | |||
719 | 120 | |||
720 | 121 | def managed_mounts(): | ||
721 | 122 | '''List of all mounted managed volumes''' | ||
722 | 123 | return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) | ||
723 | 124 | |||
724 | 125 | |||
725 | 126 | def configure_volume(before_change=lambda: None, after_change=lambda: None): | ||
726 | 127 | '''Set up storage (or don't) according to the charm's volume configuration. | ||
727 | 128 | Returns the mount point or "ephemeral". before_change and after_change | ||
728 | 129 | are optional functions to be called if the volume configuration changes. | ||
729 | 130 | ''' | ||
730 | 131 | |||
731 | 132 | config = get_config() | ||
732 | 133 | if not config: | ||
733 | 134 | hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) | ||
734 | 135 | raise VolumeConfigurationError() | ||
735 | 136 | |||
736 | 137 | if config['ephemeral']: | ||
737 | 138 | if os.path.ismount(config['mountpoint']): | ||
738 | 139 | before_change() | ||
739 | 140 | unmount_volume(config) | ||
740 | 141 | after_change() | ||
741 | 142 | return 'ephemeral' | ||
742 | 143 | else: | ||
743 | 144 | # persistent storage | ||
744 | 145 | if os.path.ismount(config['mountpoint']): | ||
745 | 146 | mounts = dict(managed_mounts()) | ||
746 | 147 | if mounts.get(config['mountpoint']) != config['device']: | ||
747 | 148 | before_change() | ||
748 | 149 | unmount_volume(config) | ||
749 | 150 | mount_volume(config) | ||
750 | 151 | after_change() | ||
751 | 152 | else: | ||
752 | 153 | before_change() | ||
753 | 154 | mount_volume(config) | ||
754 | 155 | after_change() | ||
755 | 156 | return config['mountpoint'] | ||
756 | 157 | 0 | ||
757 | === removed directory 'hooks/charmhelpers/contrib/hahelpers' | |||
758 | === removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py' | |||
759 | === removed file 'hooks/charmhelpers/contrib/hahelpers/apache.py' | |||
760 | --- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-05-09 20:11:59 +0000 | |||
761 | +++ hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000 | |||
762 | @@ -1,59 +0,0 @@ | |||
763 | 1 | # | ||
764 | 2 | # Copyright 2012 Canonical Ltd. | ||
765 | 3 | # | ||
766 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
767 | 5 | # | ||
768 | 6 | # Authors: | ||
769 | 7 | # James Page <james.page@ubuntu.com> | ||
770 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
771 | 9 | # | ||
772 | 10 | |||
773 | 11 | import subprocess | ||
774 | 12 | |||
775 | 13 | from charmhelpers.core.hookenv import ( | ||
776 | 14 | config as config_get, | ||
777 | 15 | relation_get, | ||
778 | 16 | relation_ids, | ||
779 | 17 | related_units as relation_list, | ||
780 | 18 | log, | ||
781 | 19 | INFO, | ||
782 | 20 | ) | ||
783 | 21 | |||
784 | 22 | |||
785 | 23 | def get_cert(): | ||
786 | 24 | cert = config_get('ssl_cert') | ||
787 | 25 | key = config_get('ssl_key') | ||
788 | 26 | if not (cert and key): | ||
789 | 27 | log("Inspecting identity-service relations for SSL certificate.", | ||
790 | 28 | level=INFO) | ||
791 | 29 | cert = key = None | ||
792 | 30 | for r_id in relation_ids('identity-service'): | ||
793 | 31 | for unit in relation_list(r_id): | ||
794 | 32 | if not cert: | ||
795 | 33 | cert = relation_get('ssl_cert', | ||
796 | 34 | rid=r_id, unit=unit) | ||
797 | 35 | if not key: | ||
798 | 36 | key = relation_get('ssl_key', | ||
799 | 37 | rid=r_id, unit=unit) | ||
800 | 38 | return (cert, key) | ||
801 | 39 | |||
802 | 40 | |||
803 | 41 | def get_ca_cert(): | ||
804 | 42 | ca_cert = config_get('ssl_ca') | ||
805 | 43 | if ca_cert is None: | ||
806 | 44 | log("Inspecting identity-service relations for CA SSL certificate.", | ||
807 | 45 | level=INFO) | ||
808 | 46 | for r_id in relation_ids('identity-service'): | ||
809 | 47 | for unit in relation_list(r_id): | ||
810 | 48 | if ca_cert is None: | ||
811 | 49 | ca_cert = relation_get('ca_cert', | ||
812 | 50 | rid=r_id, unit=unit) | ||
813 | 51 | return ca_cert | ||
814 | 52 | |||
815 | 53 | |||
816 | 54 | def install_ca_cert(ca_cert): | ||
817 | 55 | if ca_cert: | ||
818 | 56 | with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', | ||
819 | 57 | 'w') as crt: | ||
820 | 58 | crt.write(ca_cert) | ||
821 | 59 | subprocess.check_call(['update-ca-certificates', '--fresh']) | ||
822 | 60 | 0 | ||
823 | === removed file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
824 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-05-09 20:11:59 +0000 | |||
825 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000 | |||
826 | @@ -1,183 +0,0 @@ | |||
827 | 1 | # | ||
828 | 2 | # Copyright 2012 Canonical Ltd. | ||
829 | 3 | # | ||
830 | 4 | # Authors: | ||
831 | 5 | # James Page <james.page@ubuntu.com> | ||
832 | 6 | # Adam Gandelman <adamg@ubuntu.com> | ||
833 | 7 | # | ||
834 | 8 | |||
835 | 9 | import subprocess | ||
836 | 10 | import os | ||
837 | 11 | |||
838 | 12 | from socket import gethostname as get_unit_hostname | ||
839 | 13 | |||
840 | 14 | from charmhelpers.core.hookenv import ( | ||
841 | 15 | log, | ||
842 | 16 | relation_ids, | ||
843 | 17 | related_units as relation_list, | ||
844 | 18 | relation_get, | ||
845 | 19 | config as config_get, | ||
846 | 20 | INFO, | ||
847 | 21 | ERROR, | ||
848 | 22 | unit_get, | ||
849 | 23 | ) | ||
850 | 24 | |||
851 | 25 | |||
852 | 26 | class HAIncompleteConfig(Exception): | ||
853 | 27 | pass | ||
854 | 28 | |||
855 | 29 | |||
856 | 30 | def is_clustered(): | ||
857 | 31 | for r_id in (relation_ids('ha') or []): | ||
858 | 32 | for unit in (relation_list(r_id) or []): | ||
859 | 33 | clustered = relation_get('clustered', | ||
860 | 34 | rid=r_id, | ||
861 | 35 | unit=unit) | ||
862 | 36 | if clustered: | ||
863 | 37 | return True | ||
864 | 38 | return False | ||
865 | 39 | |||
866 | 40 | |||
867 | 41 | def is_leader(resource): | ||
868 | 42 | cmd = [ | ||
869 | 43 | "crm", "resource", | ||
870 | 44 | "show", resource | ||
871 | 45 | ] | ||
872 | 46 | try: | ||
873 | 47 | status = subprocess.check_output(cmd) | ||
874 | 48 | except subprocess.CalledProcessError: | ||
875 | 49 | return False | ||
876 | 50 | else: | ||
877 | 51 | if get_unit_hostname() in status: | ||
878 | 52 | return True | ||
879 | 53 | else: | ||
880 | 54 | return False | ||
881 | 55 | |||
882 | 56 | |||
883 | 57 | def peer_units(): | ||
884 | 58 | peers = [] | ||
885 | 59 | for r_id in (relation_ids('cluster') or []): | ||
886 | 60 | for unit in (relation_list(r_id) or []): | ||
887 | 61 | peers.append(unit) | ||
888 | 62 | return peers | ||
889 | 63 | |||
890 | 64 | |||
891 | 65 | def oldest_peer(peers): | ||
892 | 66 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | ||
893 | 67 | for peer in peers: | ||
894 | 68 | remote_unit_no = int(peer.split('/')[1]) | ||
895 | 69 | if remote_unit_no < local_unit_no: | ||
896 | 70 | return False | ||
897 | 71 | return True | ||
898 | 72 | |||
899 | 73 | |||
900 | 74 | def eligible_leader(resource): | ||
901 | 75 | if is_clustered(): | ||
902 | 76 | if not is_leader(resource): | ||
903 | 77 | log('Deferring action to CRM leader.', level=INFO) | ||
904 | 78 | return False | ||
905 | 79 | else: | ||
906 | 80 | peers = peer_units() | ||
907 | 81 | if peers and not oldest_peer(peers): | ||
908 | 82 | log('Deferring action to oldest service unit.', level=INFO) | ||
909 | 83 | return False | ||
910 | 84 | return True | ||
911 | 85 | |||
912 | 86 | |||
913 | 87 | def https(): | ||
914 | 88 | ''' | ||
915 | 89 | Determines whether enough data has been provided in configuration | ||
916 | 90 | or relation data to configure HTTPS | ||
917 | 91 | . | ||
918 | 92 | returns: boolean | ||
919 | 93 | ''' | ||
920 | 94 | if config_get('use-https') == "yes": | ||
921 | 95 | return True | ||
922 | 96 | if config_get('ssl_cert') and config_get('ssl_key'): | ||
923 | 97 | return True | ||
924 | 98 | for r_id in relation_ids('identity-service'): | ||
925 | 99 | for unit in relation_list(r_id): | ||
926 | 100 | rel_state = [ | ||
927 | 101 | relation_get('https_keystone', rid=r_id, unit=unit), | ||
928 | 102 | relation_get('ssl_cert', rid=r_id, unit=unit), | ||
929 | 103 | relation_get('ssl_key', rid=r_id, unit=unit), | ||
930 | 104 | relation_get('ca_cert', rid=r_id, unit=unit), | ||
931 | 105 | ] | ||
932 | 106 | # NOTE: works around (LP: #1203241) | ||
933 | 107 | if (None not in rel_state) and ('' not in rel_state): | ||
934 | 108 | return True | ||
935 | 109 | return False | ||
936 | 110 | |||
937 | 111 | |||
938 | 112 | def determine_api_port(public_port): | ||
939 | 113 | ''' | ||
940 | 114 | Determine correct API server listening port based on | ||
941 | 115 | existence of HTTPS reverse proxy and/or haproxy. | ||
942 | 116 | |||
943 | 117 | public_port: int: standard public port for given service | ||
944 | 118 | |||
945 | 119 | returns: int: the correct listening port for the API service | ||
946 | 120 | ''' | ||
947 | 121 | i = 0 | ||
948 | 122 | if len(peer_units()) > 0 or is_clustered(): | ||
949 | 123 | i += 1 | ||
950 | 124 | if https(): | ||
951 | 125 | i += 1 | ||
952 | 126 | return public_port - (i * 10) | ||
953 | 127 | |||
954 | 128 | |||
955 | 129 | def determine_apache_port(public_port): | ||
956 | 130 | ''' | ||
957 | 131 | Description: Determine correct apache listening port based on public IP + | ||
958 | 132 | state of the cluster. | ||
959 | 133 | |||
960 | 134 | public_port: int: standard public port for given service | ||
961 | 135 | |||
962 | 136 | returns: int: the correct listening port for the HAProxy service | ||
963 | 137 | ''' | ||
964 | 138 | i = 0 | ||
965 | 139 | if len(peer_units()) > 0 or is_clustered(): | ||
966 | 140 | i += 1 | ||
967 | 141 | return public_port - (i * 10) | ||
968 | 142 | |||
969 | 143 | |||
970 | 144 | def get_hacluster_config(): | ||
971 | 145 | ''' | ||
972 | 146 | Obtains all relevant configuration from charm configuration required | ||
973 | 147 | for initiating a relation to hacluster: | ||
974 | 148 | |||
975 | 149 | ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr | ||
976 | 150 | |||
977 | 151 | returns: dict: A dict containing settings keyed by setting name. | ||
978 | 152 | raises: HAIncompleteConfig if settings are missing. | ||
979 | 153 | ''' | ||
980 | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] | ||
981 | 155 | conf = {} | ||
982 | 156 | for setting in settings: | ||
983 | 157 | conf[setting] = config_get(setting) | ||
984 | 158 | missing = [] | ||
985 | 159 | [missing.append(s) for s, v in conf.iteritems() if v is None] | ||
986 | 160 | if missing: | ||
987 | 161 | log('Insufficient config data to configure hacluster.', level=ERROR) | ||
988 | 162 | raise HAIncompleteConfig | ||
989 | 163 | return conf | ||
990 | 164 | |||
991 | 165 | |||
992 | 166 | def canonical_url(configs, vip_setting='vip'): | ||
993 | 167 | ''' | ||
994 | 168 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
995 | 169 | configuration and hacluster. | ||
996 | 170 | |||
997 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | ||
998 | 172 | a complete https context. | ||
999 | 173 | :vip_setting: str: Setting in charm config that specifies | ||
1000 | 174 | VIP address. | ||
1001 | 175 | ''' | ||
1002 | 176 | scheme = 'http' | ||
1003 | 177 | if 'https' in configs.complete_contexts(): | ||
1004 | 178 | scheme = 'https' | ||
1005 | 179 | if is_clustered(): | ||
1006 | 180 | addr = config_get(vip_setting) | ||
1007 | 181 | else: | ||
1008 | 182 | addr = unit_get('private-address') | ||
1009 | 183 | return '%s://%s' % (scheme, addr) | ||
1010 | 184 | 0 | ||
1011 | === removed directory 'hooks/charmhelpers/contrib/jujugui' | |||
1012 | === removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py' | |||
1013 | === removed file 'hooks/charmhelpers/contrib/jujugui/utils.py' | |||
1014 | --- hooks/charmhelpers/contrib/jujugui/utils.py 2013-11-26 17:12:54 +0000 | |||
1015 | +++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000 | |||
1016 | @@ -1,602 +0,0 @@ | |||
1017 | 1 | """Juju GUI charm utilities.""" | ||
1018 | 2 | |||
1019 | 3 | __all__ = [ | ||
1020 | 4 | 'AGENT', | ||
1021 | 5 | 'APACHE', | ||
1022 | 6 | 'API_PORT', | ||
1023 | 7 | 'CURRENT_DIR', | ||
1024 | 8 | 'HAPROXY', | ||
1025 | 9 | 'IMPROV', | ||
1026 | 10 | 'JUJU_DIR', | ||
1027 | 11 | 'JUJU_GUI_DIR', | ||
1028 | 12 | 'JUJU_GUI_SITE', | ||
1029 | 13 | 'JUJU_PEM', | ||
1030 | 14 | 'WEB_PORT', | ||
1031 | 15 | 'bzr_checkout', | ||
1032 | 16 | 'chain', | ||
1033 | 17 | 'cmd_log', | ||
1034 | 18 | 'fetch_api', | ||
1035 | 19 | 'fetch_gui', | ||
1036 | 20 | 'find_missing_packages', | ||
1037 | 21 | 'first_path_in_dir', | ||
1038 | 22 | 'get_api_address', | ||
1039 | 23 | 'get_npm_cache_archive_url', | ||
1040 | 24 | 'get_release_file_url', | ||
1041 | 25 | 'get_staging_dependencies', | ||
1042 | 26 | 'get_zookeeper_address', | ||
1043 | 27 | 'legacy_juju', | ||
1044 | 28 | 'log_hook', | ||
1045 | 29 | 'merge', | ||
1046 | 30 | 'parse_source', | ||
1047 | 31 | 'prime_npm_cache', | ||
1048 | 32 | 'render_to_file', | ||
1049 | 33 | 'save_or_create_certificates', | ||
1050 | 34 | 'setup_apache', | ||
1051 | 35 | 'setup_gui', | ||
1052 | 36 | 'start_agent', | ||
1053 | 37 | 'start_gui', | ||
1054 | 38 | 'start_improv', | ||
1055 | 39 | 'write_apache_config', | ||
1056 | 40 | ] | ||
1057 | 41 | |||
1058 | 42 | from contextlib import contextmanager | ||
1059 | 43 | import errno | ||
1060 | 44 | import json | ||
1061 | 45 | import os | ||
1062 | 46 | import logging | ||
1063 | 47 | import shutil | ||
1064 | 48 | from subprocess import CalledProcessError | ||
1065 | 49 | import tempfile | ||
1066 | 50 | from urlparse import urlparse | ||
1067 | 51 | |||
1068 | 52 | import apt | ||
1069 | 53 | import tempita | ||
1070 | 54 | |||
1071 | 55 | from launchpadlib.launchpad import Launchpad | ||
1072 | 56 | from shelltoolbox import ( | ||
1073 | 57 | Serializer, | ||
1074 | 58 | apt_get_install, | ||
1075 | 59 | command, | ||
1076 | 60 | environ, | ||
1077 | 61 | install_extra_repositories, | ||
1078 | 62 | run, | ||
1079 | 63 | script_name, | ||
1080 | 64 | search_file, | ||
1081 | 65 | su, | ||
1082 | 66 | ) | ||
1083 | 67 | from charmhelpers.core.host import ( | ||
1084 | 68 | service_start, | ||
1085 | 69 | ) | ||
1086 | 70 | from charmhelpers.core.hookenv import ( | ||
1087 | 71 | log, | ||
1088 | 72 | config, | ||
1089 | 73 | unit_get, | ||
1090 | 74 | ) | ||
1091 | 75 | |||
1092 | 76 | |||
1093 | 77 | AGENT = 'juju-api-agent' | ||
1094 | 78 | APACHE = 'apache2' | ||
1095 | 79 | IMPROV = 'juju-api-improv' | ||
1096 | 80 | HAPROXY = 'haproxy' | ||
1097 | 81 | |||
1098 | 82 | API_PORT = 8080 | ||
1099 | 83 | WEB_PORT = 8000 | ||
1100 | 84 | |||
1101 | 85 | CURRENT_DIR = os.getcwd() | ||
1102 | 86 | JUJU_DIR = os.path.join(CURRENT_DIR, 'juju') | ||
1103 | 87 | JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui') | ||
1104 | 88 | JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui' | ||
1105 | 89 | JUJU_GUI_PORTS = '/etc/apache2/ports.conf' | ||
1106 | 90 | JUJU_PEM = 'juju.includes-private-key.pem' | ||
1107 | 91 | BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',) | ||
1108 | 92 | DEB_BUILD_DEPENDENCIES = ( | ||
1109 | 93 | 'bzr', 'imagemagick', 'make', 'nodejs', 'npm', | ||
1110 | 94 | ) | ||
1111 | 95 | DEB_STAGE_DEPENDENCIES = ( | ||
1112 | 96 | 'zookeeper', | ||
1113 | 97 | ) | ||
1114 | 98 | |||
1115 | 99 | |||
1116 | 100 | # Store the configuration from on invocation to the next. | ||
1117 | 101 | config_json = Serializer('/tmp/config.json') | ||
1118 | 102 | # Bazaar checkout command. | ||
1119 | 103 | bzr_checkout = command('bzr', 'co', '--lightweight') | ||
1120 | 104 | # Whether or not the charm is deployed using juju-core. | ||
1121 | 105 | # If juju-core has been used to deploy the charm, an agent.conf file must | ||
1122 | 106 | # be present in the charm parent directory. | ||
1123 | 107 | legacy_juju = lambda: not os.path.exists( | ||
1124 | 108 | os.path.join(CURRENT_DIR, '..', 'agent.conf')) | ||
1125 | 109 | |||
1126 | 110 | |||
1127 | 111 | def _get_build_dependencies(): | ||
1128 | 112 | """Install deb dependencies for building.""" | ||
1129 | 113 | log('Installing build dependencies.') | ||
1130 | 114 | cmd_log(install_extra_repositories(*BUILD_REPOSITORIES)) | ||
1131 | 115 | cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES)) | ||
1132 | 116 | |||
1133 | 117 | |||
1134 | 118 | def get_api_address(unit_dir): | ||
1135 | 119 | """Return the Juju API address stored in the uniter agent.conf file.""" | ||
1136 | 120 | import yaml # python-yaml is only installed if juju-core is used. | ||
1137 | 121 | # XXX 2013-03-27 frankban bug=1161443: | ||
1138 | 122 | # currently the uniter agent.conf file does not include the API | ||
1139 | 123 | # address. For now retrieve it from the machine agent file. | ||
1140 | 124 | base_dir = os.path.abspath(os.path.join(unit_dir, '..')) | ||
1141 | 125 | for dirname in os.listdir(base_dir): | ||
1142 | 126 | if dirname.startswith('machine-'): | ||
1143 | 127 | agent_conf = os.path.join(base_dir, dirname, 'agent.conf') | ||
1144 | 128 | break | ||
1145 | 129 | else: | ||
1146 | 130 | raise IOError('Juju agent configuration file not found.') | ||
1147 | 131 | contents = yaml.load(open(agent_conf)) | ||
1148 | 132 | return contents['apiinfo']['addrs'][0] | ||
1149 | 133 | |||
1150 | 134 | |||
1151 | 135 | def get_staging_dependencies(): | ||
1152 | 136 | """Install deb dependencies for the stage (improv) environment.""" | ||
1153 | 137 | log('Installing stage dependencies.') | ||
1154 | 138 | cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES)) | ||
1155 | 139 | |||
1156 | 140 | |||
1157 | 141 | def first_path_in_dir(directory): | ||
1158 | 142 | """Return the full path of the first file/dir in *directory*.""" | ||
1159 | 143 | return os.path.join(directory, os.listdir(directory)[0]) | ||
1160 | 144 | |||
1161 | 145 | |||
1162 | 146 | def _get_by_attr(collection, attr, value): | ||
1163 | 147 | """Return the first item in collection having attr == value. | ||
1164 | 148 | |||
1165 | 149 | Return None if the item is not found. | ||
1166 | 150 | """ | ||
1167 | 151 | for item in collection: | ||
1168 | 152 | if getattr(item, attr) == value: | ||
1169 | 153 | return item | ||
1170 | 154 | |||
1171 | 155 | |||
1172 | 156 | def get_release_file_url(project, series_name, release_version): | ||
1173 | 157 | """Return the URL of the release file hosted in Launchpad. | ||
1174 | 158 | |||
1175 | 159 | The returned URL points to a release file for the given project, series | ||
1176 | 160 | name and release version. | ||
1177 | 161 | The argument *project* is a project object as returned by launchpadlib. | ||
1178 | 162 | The arguments *series_name* and *release_version* are strings. If | ||
1179 | 163 | *release_version* is None, the URL of the latest release will be returned. | ||
1180 | 164 | """ | ||
1181 | 165 | series = _get_by_attr(project.series, 'name', series_name) | ||
1182 | 166 | if series is None: | ||
1183 | 167 | raise ValueError('%r: series not found' % series_name) | ||
1184 | 168 | # Releases are returned by Launchpad in reverse date order. | ||
1185 | 169 | releases = list(series.releases) | ||
1186 | 170 | if not releases: | ||
1187 | 171 | raise ValueError('%r: series does not contain releases' % series_name) | ||
1188 | 172 | if release_version is not None: | ||
1189 | 173 | release = _get_by_attr(releases, 'version', release_version) | ||
1190 | 174 | if release is None: | ||
1191 | 175 | raise ValueError('%r: release not found' % release_version) | ||
1192 | 176 | releases = [release] | ||
1193 | 177 | for release in releases: | ||
1194 | 178 | for file_ in release.files: | ||
1195 | 179 | if str(file_).endswith('.tgz'): | ||
1196 | 180 | return file_.file_link | ||
1197 | 181 | raise ValueError('%r: file not found' % release_version) | ||
1198 | 182 | |||
1199 | 183 | |||
1200 | 184 | def get_zookeeper_address(agent_file_path): | ||
1201 | 185 | """Retrieve the Zookeeper address contained in the given *agent_file_path*. | ||
1202 | 186 | |||
1203 | 187 | The *agent_file_path* is a path to a file containing a line similar to the | ||
1204 | 188 | following:: | ||
1205 | 189 | |||
1206 | 190 | env JUJU_ZOOKEEPER="address" | ||
1207 | 191 | """ | ||
1208 | 192 | line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip() | ||
1209 | 193 | return line.split('=')[1].strip('"') | ||
1210 | 194 | |||
1211 | 195 | |||
1212 | 196 | @contextmanager | ||
1213 | 197 | def log_hook(): | ||
1214 | 198 | """Log when a hook starts and stops its execution. | ||
1215 | 199 | |||
1216 | 200 | Also log to stdout possible CalledProcessError exceptions raised executing | ||
1217 | 201 | the hook. | ||
1218 | 202 | """ | ||
1219 | 203 | script = script_name() | ||
1220 | 204 | log(">>> Entering {}".format(script)) | ||
1221 | 205 | try: | ||
1222 | 206 | yield | ||
1223 | 207 | except CalledProcessError as err: | ||
1224 | 208 | log('Exception caught:') | ||
1225 | 209 | log(err.output) | ||
1226 | 210 | raise | ||
1227 | 211 | finally: | ||
1228 | 212 | log("<<< Exiting {}".format(script)) | ||
1229 | 213 | |||
1230 | 214 | |||
1231 | 215 | def parse_source(source): | ||
1232 | 216 | """Parse the ``juju-gui-source`` option. | ||
1233 | 217 | |||
1234 | 218 | Return a tuple of two elements representing info on how to deploy Juju GUI. | ||
1235 | 219 | Examples: | ||
1236 | 220 | - ('stable', None): latest stable release; | ||
1237 | 221 | - ('stable', '0.1.0'): stable release v0.1.0; | ||
1238 | 222 | - ('trunk', None): latest trunk release; | ||
1239 | 223 | - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1; | ||
1240 | 224 | - ('branch', 'lp:juju-gui'): release is made from a branch; | ||
1241 | 225 | - ('url', 'http://example.com/gui'): release from a downloaded file. | ||
1242 | 226 | """ | ||
1243 | 227 | if source.startswith('url:'): | ||
1244 | 228 | source = source[4:] | ||
1245 | 229 | # Support file paths, including relative paths. | ||
1246 | 230 | if urlparse(source).scheme == '': | ||
1247 | 231 | if not source.startswith('/'): | ||
1248 | 232 | source = os.path.join(os.path.abspath(CURRENT_DIR), source) | ||
1249 | 233 | source = "file://%s" % source | ||
1250 | 234 | return 'url', source | ||
1251 | 235 | if source in ('stable', 'trunk'): | ||
1252 | 236 | return source, None | ||
1253 | 237 | if source.startswith('lp:') or source.startswith('http://'): | ||
1254 | 238 | return 'branch', source | ||
1255 | 239 | if 'build' in source: | ||
1256 | 240 | return 'trunk', source | ||
1257 | 241 | return 'stable', source | ||
1258 | 242 | |||
1259 | 243 | |||
1260 | 244 | def render_to_file(template_name, context, destination): | ||
1261 | 245 | """Render the given *template_name* into *destination* using *context*. | ||
1262 | 246 | |||
1263 | 247 | The tempita template language is used to render contents | ||
1264 | 248 | (see http://pythonpaste.org/tempita/). | ||
1265 | 249 | The argument *template_name* is the name or path of the template file: | ||
1266 | 250 | it may be either a path relative to ``../config`` or an absolute path. | ||
1267 | 251 | The argument *destination* is a file path. | ||
1268 | 252 | The argument *context* is a dict-like object. | ||
1269 | 253 | """ | ||
1270 | 254 | template_path = os.path.abspath(template_name) | ||
1271 | 255 | template = tempita.Template.from_filename(template_path) | ||
1272 | 256 | with open(destination, 'w') as stream: | ||
1273 | 257 | stream.write(template.substitute(context)) | ||
1274 | 258 | |||
1275 | 259 | |||
1276 | 260 | results_log = None | ||
1277 | 261 | |||
1278 | 262 | |||
1279 | 263 | def _setupLogging(): | ||
1280 | 264 | global results_log | ||
1281 | 265 | if results_log is not None: | ||
1282 | 266 | return | ||
1283 | 267 | cfg = config() | ||
1284 | 268 | logging.basicConfig( | ||
1285 | 269 | filename=cfg['command-log-file'], | ||
1286 | 270 | level=logging.INFO, | ||
1287 | 271 | format="%(asctime)s: %(name)s@%(levelname)s %(message)s") | ||
1288 | 272 | results_log = logging.getLogger('juju-gui') | ||
1289 | 273 | |||
1290 | 274 | |||
1291 | 275 | def cmd_log(results): | ||
1292 | 276 | global results_log | ||
1293 | 277 | if not results: | ||
1294 | 278 | return | ||
1295 | 279 | if results_log is None: | ||
1296 | 280 | _setupLogging() | ||
1297 | 281 | # Since 'results' may be multi-line output, start it on a separate line | ||
1298 | 282 | # from the logger timestamp, etc. | ||
1299 | 283 | results_log.info('\n' + results) | ||
1300 | 284 | |||
1301 | 285 | |||
1302 | 286 | def start_improv(staging_env, ssl_cert_path, | ||
1303 | 287 | config_path='/etc/init/juju-api-improv.conf'): | ||
1304 | 288 | """Start a simulated juju environment using ``improv.py``.""" | ||
1305 | 289 | log('Setting up staging start up script.') | ||
1306 | 290 | context = { | ||
1307 | 291 | 'juju_dir': JUJU_DIR, | ||
1308 | 292 | 'keys': ssl_cert_path, | ||
1309 | 293 | 'port': API_PORT, | ||
1310 | 294 | 'staging_env': staging_env, | ||
1311 | 295 | } | ||
1312 | 296 | render_to_file('config/juju-api-improv.conf.template', context, config_path) | ||
1313 | 297 | log('Starting the staging backend.') | ||
1314 | 298 | with su('root'): | ||
1315 | 299 | service_start(IMPROV) | ||
1316 | 300 | |||
1317 | 301 | |||
1318 | 302 | def start_agent( | ||
1319 | 303 | ssl_cert_path, config_path='/etc/init/juju-api-agent.conf', | ||
1320 | 304 | read_only=False): | ||
1321 | 305 | """Start the Juju agent and connect to the current environment.""" | ||
1322 | 306 | # Retrieve the Zookeeper address from the start up script. | ||
1323 | 307 | unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..')) | ||
1324 | 308 | agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir)) | ||
1325 | 309 | zookeeper = get_zookeeper_address(agent_file) | ||
1326 | 310 | log('Setting up API agent start up script.') | ||
1327 | 311 | context = { | ||
1328 | 312 | 'juju_dir': JUJU_DIR, | ||
1329 | 313 | 'keys': ssl_cert_path, | ||
1330 | 314 | 'port': API_PORT, | ||
1331 | 315 | 'zookeeper': zookeeper, | ||
1332 | 316 | 'read_only': read_only | ||
1333 | 317 | } | ||
1334 | 318 | render_to_file('config/juju-api-agent.conf.template', context, config_path) | ||
1335 | 319 | log('Starting API agent.') | ||
1336 | 320 | with su('root'): | ||
1337 | 321 | service_start(AGENT) | ||
1338 | 322 | |||
1339 | 323 | |||
1340 | 324 | def start_gui( | ||
1341 | 325 | console_enabled, login_help, readonly, in_staging, ssl_cert_path, | ||
1342 | 326 | charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg', | ||
1343 | 327 | config_js_path=None, secure=True, sandbox=False): | ||
1344 | 328 | """Set up and start the Juju GUI server.""" | ||
1345 | 329 | with su('root'): | ||
1346 | 330 | run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR) | ||
1347 | 331 | # XXX 2013-02-05 frankban bug=1116320: | ||
1348 | 332 | # External insecure resources are still loaded when testing in the | ||
1349 | 333 | # debug environment. For now, switch to the production environment if | ||
1350 | 334 | # the charm is configured to serve tests. | ||
1351 | 335 | if in_staging and not serve_tests: | ||
1352 | 336 | build_dirname = 'build-debug' | ||
1353 | 337 | else: | ||
1354 | 338 | build_dirname = 'build-prod' | ||
1355 | 339 | build_dir = os.path.join(JUJU_GUI_DIR, build_dirname) | ||
1356 | 340 | log('Generating the Juju GUI configuration file.') | ||
1357 | 341 | is_legacy_juju = legacy_juju() | ||
1358 | 342 | user, password = None, None | ||
1359 | 343 | if (is_legacy_juju and in_staging) or sandbox: | ||
1360 | 344 | user, password = 'admin', 'admin' | ||
1361 | 345 | else: | ||
1362 | 346 | user, password = None, None | ||
1363 | 347 | |||
1364 | 348 | api_backend = 'python' if is_legacy_juju else 'go' | ||
1365 | 349 | if secure: | ||
1366 | 350 | protocol = 'wss' | ||
1367 | 351 | else: | ||
1368 | 352 | log('Running in insecure mode! Port 80 will serve unencrypted.') | ||
1369 | 353 | protocol = 'ws' | ||
1370 | 354 | |||
1371 | 355 | context = { | ||
1372 | 356 | 'raw_protocol': protocol, | ||
1373 | 357 | 'address': unit_get('public-address'), | ||
1374 | 358 | 'console_enabled': json.dumps(console_enabled), | ||
1375 | 359 | 'login_help': json.dumps(login_help), | ||
1376 | 360 | 'password': json.dumps(password), | ||
1377 | 361 | 'api_backend': json.dumps(api_backend), | ||
1378 | 362 | 'readonly': json.dumps(readonly), | ||
1379 | 363 | 'user': json.dumps(user), | ||
1380 | 364 | 'protocol': json.dumps(protocol), | ||
1381 | 365 | 'sandbox': json.dumps(sandbox), | ||
1382 | 366 | 'charmworld_url': json.dumps(charmworld_url), | ||
1383 | 367 | } | ||
1384 | 368 | if config_js_path is None: | ||
1385 | 369 | config_js_path = os.path.join( | ||
1386 | 370 | build_dir, 'juju-ui', 'assets', 'config.js') | ||
1387 | 371 | render_to_file('config/config.js.template', context, config_js_path) | ||
1388 | 372 | |||
1389 | 373 | write_apache_config(build_dir, serve_tests) | ||
1390 | 374 | |||
1391 | 375 | log('Generating haproxy configuration file.') | ||
1392 | 376 | if is_legacy_juju: | ||
1393 | 377 | # The PyJuju API agent is listening on localhost. | ||
1394 | 378 | api_address = '127.0.0.1:{0}'.format(API_PORT) | ||
1395 | 379 | else: | ||
1396 | 380 | # Retrieve the juju-core API server address. | ||
1397 | 381 | api_address = get_api_address(os.path.join(CURRENT_DIR, '..')) | ||
1398 | 382 | context = { | ||
1399 | 383 | 'api_address': api_address, | ||
1400 | 384 | 'api_pem': JUJU_PEM, | ||
1401 | 385 | 'legacy_juju': is_legacy_juju, | ||
1402 | 386 | 'ssl_cert_path': ssl_cert_path, | ||
1403 | 387 | # In PyJuju environments, use the same certificate for both HTTPS and | ||
1404 | 388 | # WebSocket connections. In juju-core the system already has the proper | ||
1405 | 389 | # certificate installed. | ||
1406 | 390 | 'web_pem': JUJU_PEM, | ||
1407 | 391 | 'web_port': WEB_PORT, | ||
1408 | 392 | 'secure': secure | ||
1409 | 393 | } | ||
1410 | 394 | render_to_file('config/haproxy.cfg.template', context, haproxy_path) | ||
1411 | 395 | log('Starting Juju GUI.') | ||
1412 | 396 | |||
1413 | 397 | |||
1414 | 398 | def write_apache_config(build_dir, serve_tests=False): | ||
1415 | 399 | log('Generating the apache site configuration file.') | ||
1416 | 400 | context = { | ||
1417 | 401 | 'port': WEB_PORT, | ||
1418 | 402 | 'serve_tests': serve_tests, | ||
1419 | 403 | 'server_root': build_dir, | ||
1420 | 404 | 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''), | ||
1421 | 405 | } | ||
1422 | 406 | render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS) | ||
1423 | 407 | render_to_file('config/apache-site.template', context, JUJU_GUI_SITE) | ||
1424 | 408 | |||
1425 | 409 | |||
1426 | 410 | def get_npm_cache_archive_url(Launchpad=Launchpad): | ||
1427 | 411 | """Figure out the URL of the most recent NPM cache archive on Launchpad.""" | ||
1428 | 412 | launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production') | ||
1429 | 413 | project = launchpad.projects['juju-gui'] | ||
1430 | 414 | # Find the URL of the most recently created NPM cache archive. | ||
1431 | 415 | npm_cache_url = get_release_file_url(project, 'npm-cache', None) | ||
1432 | 416 | return npm_cache_url | ||
1433 | 417 | |||
1434 | 418 | |||
1435 | 419 | def prime_npm_cache(npm_cache_url): | ||
1436 | 420 | """Download NPM cache archive and prime the NPM cache with it.""" | ||
1437 | 421 | # Download the cache archive and then uncompress it into the NPM cache. | ||
1438 | 422 | npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz') | ||
1439 | 423 | cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url)) | ||
1440 | 424 | npm_cache_dir = os.path.expanduser('~/.npm') | ||
1441 | 425 | # The NPM cache directory probably does not exist, so make it if not. | ||
1442 | 426 | try: | ||
1443 | 427 | os.mkdir(npm_cache_dir) | ||
1444 | 428 | except OSError, e: | ||
1445 | 429 | # If the directory already exists then ignore the error. | ||
1446 | 430 | if e.errno != errno.EEXIST: # File exists. | ||
1447 | 431 | raise | ||
1448 | 432 | uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f') | ||
1449 | 433 | cmd_log(uncompress(npm_cache_archive)) | ||
1450 | 434 | |||
1451 | 435 | |||
1452 | 436 | def fetch_gui(juju_gui_source, logpath): | ||
1453 | 437 | """Retrieve the Juju GUI release/branch.""" | ||
1454 | 438 | # Retrieve a Juju GUI release. | ||
1455 | 439 | origin, version_or_branch = parse_source(juju_gui_source) | ||
1456 | 440 | if origin == 'branch': | ||
1457 | 441 | # Make sure we have the dependencies necessary for us to actually make | ||
1458 | 442 | # a build. | ||
1459 | 443 | _get_build_dependencies() | ||
1460 | 444 | # Create a release starting from a branch. | ||
1461 | 445 | juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source') | ||
1462 | 446 | log('Retrieving Juju GUI source checkout from %s.' % version_or_branch) | ||
1463 | 447 | cmd_log(run('rm', '-rf', juju_gui_source_dir)) | ||
1464 | 448 | cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir)) | ||
1465 | 449 | log('Preparing a Juju GUI release.') | ||
1466 | 450 | logdir = os.path.dirname(logpath) | ||
1467 | 451 | fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir) | ||
1468 | 452 | log('Output from "make distfile" sent to %s' % name) | ||
1469 | 453 | with environ(NO_BZR='1'): | ||
1470 | 454 | run('make', '-C', juju_gui_source_dir, 'distfile', | ||
1471 | 455 | stdout=fd, stderr=fd) | ||
1472 | 456 | release_tarball = first_path_in_dir( | ||
1473 | 457 | os.path.join(juju_gui_source_dir, 'releases')) | ||
1474 | 458 | else: | ||
1475 | 459 | log('Retrieving Juju GUI release.') | ||
1476 | 460 | if origin == 'url': | ||
1477 | 461 | file_url = version_or_branch | ||
1478 | 462 | else: | ||
1479 | 463 | # Retrieve a release from Launchpad. | ||
1480 | 464 | launchpad = Launchpad.login_anonymously( | ||
1481 | 465 | 'Juju GUI charm', 'production') | ||
1482 | 466 | project = launchpad.projects['juju-gui'] | ||
1483 | 467 | file_url = get_release_file_url(project, origin, version_or_branch) | ||
1484 | 468 | log('Downloading release file from %s.' % file_url) | ||
1485 | 469 | release_tarball = os.path.join(CURRENT_DIR, 'release.tgz') | ||
1486 | 470 | cmd_log(run('curl', '-L', '-o', release_tarball, file_url)) | ||
1487 | 471 | return release_tarball | ||
1488 | 472 | |||
1489 | 473 | |||
1490 | 474 | def fetch_api(juju_api_branch): | ||
1491 | 475 | """Retrieve the Juju branch.""" | ||
1492 | 476 | # Retrieve Juju API source checkout. | ||
1493 | 477 | log('Retrieving Juju API source checkout.') | ||
1494 | 478 | cmd_log(run('rm', '-rf', JUJU_DIR)) | ||
1495 | 479 | cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR)) | ||
1496 | 480 | |||
1497 | 481 | |||
1498 | 482 | def setup_gui(release_tarball): | ||
1499 | 483 | """Set up Juju GUI.""" | ||
1500 | 484 | # Uncompress the release tarball. | ||
1501 | 485 | log('Installing Juju GUI.') | ||
1502 | 486 | release_dir = os.path.join(CURRENT_DIR, 'release') | ||
1503 | 487 | cmd_log(run('rm', '-rf', release_dir)) | ||
1504 | 488 | os.mkdir(release_dir) | ||
1505 | 489 | uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f') | ||
1506 | 490 | cmd_log(uncompress(release_tarball)) | ||
1507 | 491 | # Link the Juju GUI dir to the contents of the release tarball. | ||
1508 | 492 | cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR)) | ||
1509 | 493 | |||
1510 | 494 | |||
1511 | 495 | def setup_apache(): | ||
1512 | 496 | """Set up apache.""" | ||
1513 | 497 | log('Setting up apache.') | ||
1514 | 498 | if not os.path.exists(JUJU_GUI_SITE): | ||
1515 | 499 | cmd_log(run('touch', JUJU_GUI_SITE)) | ||
1516 | 500 | cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE)) | ||
1517 | 501 | cmd_log( | ||
1518 | 502 | run('ln', '-s', JUJU_GUI_SITE, | ||
1519 | 503 | '/etc/apache2/sites-enabled/juju-gui')) | ||
1520 | 504 | |||
1521 | 505 | if not os.path.exists(JUJU_GUI_PORTS): | ||
1522 | 506 | cmd_log(run('touch', JUJU_GUI_PORTS)) | ||
1523 | 507 | cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS)) | ||
1524 | 508 | |||
1525 | 509 | with su('root'): | ||
1526 | 510 | run('a2dissite', 'default') | ||
1527 | 511 | run('a2ensite', 'juju-gui') | ||
1528 | 512 | |||
1529 | 513 | |||
1530 | 514 | def save_or_create_certificates( | ||
1531 | 515 | ssl_cert_path, ssl_cert_contents, ssl_key_contents): | ||
1532 | 516 | """Generate the SSL certificates. | ||
1533 | 517 | |||
1534 | 518 | If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them | ||
1535 | 519 | as certificates; otherwise, generate them. | ||
1536 | 520 | |||
1537 | 521 | Also create a pem file, suitable for use in the haproxy configuration, | ||
1538 | 522 | concatenating the key and the certificate files. | ||
1539 | 523 | """ | ||
1540 | 524 | crt_path = os.path.join(ssl_cert_path, 'juju.crt') | ||
1541 | 525 | key_path = os.path.join(ssl_cert_path, 'juju.key') | ||
1542 | 526 | if not os.path.exists(ssl_cert_path): | ||
1543 | 527 | os.makedirs(ssl_cert_path) | ||
1544 | 528 | if ssl_cert_contents and ssl_key_contents: | ||
1545 | 529 | # Save the provided certificates. | ||
1546 | 530 | with open(crt_path, 'w') as cert_file: | ||
1547 | 531 | cert_file.write(ssl_cert_contents) | ||
1548 | 532 | with open(key_path, 'w') as key_file: | ||
1549 | 533 | key_file.write(ssl_key_contents) | ||
1550 | 534 | else: | ||
1551 | 535 | # Generate certificates. | ||
1552 | 536 | # See http://superuser.com/questions/226192/openssl-without-prompt | ||
1553 | 537 | cmd_log(run( | ||
1554 | 538 | 'openssl', 'req', '-new', '-newkey', 'rsa:4096', | ||
1555 | 539 | '-days', '365', '-nodes', '-x509', '-subj', | ||
1556 | 540 | # These are arbitrary test values for the certificate. | ||
1557 | 541 | '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com', | ||
1558 | 542 | '-keyout', key_path, '-out', crt_path)) | ||
1559 | 543 | # Generate the pem file. | ||
1560 | 544 | pem_path = os.path.join(ssl_cert_path, JUJU_PEM) | ||
1561 | 545 | if os.path.exists(pem_path): | ||
1562 | 546 | os.remove(pem_path) | ||
1563 | 547 | with open(pem_path, 'w') as pem_file: | ||
1564 | 548 | shutil.copyfileobj(open(key_path), pem_file) | ||
1565 | 549 | shutil.copyfileobj(open(crt_path), pem_file) | ||
1566 | 550 | |||
1567 | 551 | |||
1568 | 552 | def find_missing_packages(*packages): | ||
1569 | 553 | """Given a list of packages, return the packages which are not installed. | ||
1570 | 554 | """ | ||
1571 | 555 | cache = apt.Cache() | ||
1572 | 556 | missing = set() | ||
1573 | 557 | for pkg_name in packages: | ||
1574 | 558 | try: | ||
1575 | 559 | pkg = cache[pkg_name] | ||
1576 | 560 | except KeyError: | ||
1577 | 561 | missing.add(pkg_name) | ||
1578 | 562 | continue | ||
1579 | 563 | if pkg.is_installed: | ||
1580 | 564 | continue | ||
1581 | 565 | missing.add(pkg_name) | ||
1582 | 566 | return missing | ||
1583 | 567 | |||
1584 | 568 | |||
1585 | 569 | ## Backend support decorators | ||
1586 | 570 | |||
1587 | 571 | def chain(name): | ||
1588 | 572 | """Helper method to compose a set of mixin objects into a callable. | ||
1589 | 573 | |||
1590 | 574 | Each method is called in the context of its mixin instance, and its | ||
1591 | 575 | argument is the Backend instance. | ||
1592 | 576 | """ | ||
1593 | 577 | # Chain method calls through all implementing mixins. | ||
1594 | 578 | def method(self): | ||
1595 | 579 | for mixin in self.mixins: | ||
1596 | 580 | a_callable = getattr(type(mixin), name, None) | ||
1597 | 581 | if a_callable: | ||
1598 | 582 | a_callable(mixin, self) | ||
1599 | 583 | |||
1600 | 584 | method.__name__ = name | ||
1601 | 585 | return method | ||
1602 | 586 | |||
1603 | 587 | |||
1604 | 588 | def merge(name): | ||
1605 | 589 | """Helper to merge a property from a set of strategy objects | ||
1606 | 590 | into a unified set. | ||
1607 | 591 | """ | ||
1608 | 592 | # Return merged property from every providing mixin as a set. | ||
1609 | 593 | @property | ||
1610 | 594 | def method(self): | ||
1611 | 595 | result = set() | ||
1612 | 596 | for mixin in self.mixins: | ||
1613 | 597 | segment = getattr(type(mixin), name, None) | ||
1614 | 598 | if segment and isinstance(segment, (list, tuple, set)): | ||
1615 | 599 | result |= set(segment) | ||
1616 | 600 | |||
1617 | 601 | return result | ||
1618 | 602 | return method | ||
1619 | 603 | 0 | ||
1620 | === removed directory 'hooks/charmhelpers/contrib/network' | |||
1621 | === removed file 'hooks/charmhelpers/contrib/network/__init__.py' | |||
1622 | === removed file 'hooks/charmhelpers/contrib/network/ip.py' | |||
1623 | --- hooks/charmhelpers/contrib/network/ip.py 2014-05-09 20:11:59 +0000 | |||
1624 | +++ hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 | |||
1625 | @@ -1,69 +0,0 @@ | |||
1626 | 1 | import sys | ||
1627 | 2 | |||
1628 | 3 | from charmhelpers.fetch import apt_install | ||
1629 | 4 | from charmhelpers.core.hookenv import ( | ||
1630 | 5 | ERROR, log, | ||
1631 | 6 | ) | ||
1632 | 7 | |||
1633 | 8 | try: | ||
1634 | 9 | import netifaces | ||
1635 | 10 | except ImportError: | ||
1636 | 11 | apt_install('python-netifaces') | ||
1637 | 12 | import netifaces | ||
1638 | 13 | |||
1639 | 14 | try: | ||
1640 | 15 | import netaddr | ||
1641 | 16 | except ImportError: | ||
1642 | 17 | apt_install('python-netaddr') | ||
1643 | 18 | import netaddr | ||
1644 | 19 | |||
1645 | 20 | |||
1646 | 21 | def _validate_cidr(network): | ||
1647 | 22 | try: | ||
1648 | 23 | netaddr.IPNetwork(network) | ||
1649 | 24 | except (netaddr.core.AddrFormatError, ValueError): | ||
1650 | 25 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
1651 | 26 | network) | ||
1652 | 27 | |||
1653 | 28 | |||
1654 | 29 | def get_address_in_network(network, fallback=None, fatal=False): | ||
1655 | 30 | """ | ||
1656 | 31 | Get an IPv4 address within the network from the host. | ||
1657 | 32 | |||
1658 | 33 | Args: | ||
1659 | 34 | network (str): CIDR presentation format. For example, | ||
1660 | 35 | '192.168.1.0/24'. | ||
1661 | 36 | fallback (str): If no address is found, return fallback. | ||
1662 | 37 | fatal (boolean): If no address is found, fallback is not | ||
1663 | 38 | set and fatal is True then exit(1). | ||
1664 | 39 | """ | ||
1665 | 40 | |||
1666 | 41 | def not_found_error_out(): | ||
1667 | 42 | log("No IP address found in network: %s" % network, | ||
1668 | 43 | level=ERROR) | ||
1669 | 44 | sys.exit(1) | ||
1670 | 45 | |||
1671 | 46 | if network is None: | ||
1672 | 47 | if fallback is not None: | ||
1673 | 48 | return fallback | ||
1674 | 49 | else: | ||
1675 | 50 | if fatal: | ||
1676 | 51 | not_found_error_out() | ||
1677 | 52 | |||
1678 | 53 | _validate_cidr(network) | ||
1679 | 54 | for iface in netifaces.interfaces(): | ||
1680 | 55 | addresses = netifaces.ifaddresses(iface) | ||
1681 | 56 | if netifaces.AF_INET in addresses: | ||
1682 | 57 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
1683 | 58 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
1684 | 59 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
1685 | 60 | if cidr in netaddr.IPNetwork(network): | ||
1686 | 61 | return str(cidr.ip) | ||
1687 | 62 | |||
1688 | 63 | if fallback is not None: | ||
1689 | 64 | return fallback | ||
1690 | 65 | |||
1691 | 66 | if fatal: | ||
1692 | 67 | not_found_error_out() | ||
1693 | 68 | |||
1694 | 69 | return None | ||
1695 | 70 | 0 | ||
1696 | === removed directory 'hooks/charmhelpers/contrib/network/ovs' | |||
1697 | === removed file 'hooks/charmhelpers/contrib/network/ovs/__init__.py' | |||
1698 | --- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-11-26 17:12:54 +0000 | |||
1699 | +++ hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000 | |||
1700 | @@ -1,75 +0,0 @@ | |||
1701 | 1 | ''' Helpers for interacting with OpenvSwitch ''' | ||
1702 | 2 | import subprocess | ||
1703 | 3 | import os | ||
1704 | 4 | from charmhelpers.core.hookenv import ( | ||
1705 | 5 | log, WARNING | ||
1706 | 6 | ) | ||
1707 | 7 | from charmhelpers.core.host import ( | ||
1708 | 8 | service | ||
1709 | 9 | ) | ||
1710 | 10 | |||
1711 | 11 | |||
1712 | 12 | def add_bridge(name): | ||
1713 | 13 | ''' Add the named bridge to openvswitch ''' | ||
1714 | 14 | log('Creating bridge {}'.format(name)) | ||
1715 | 15 | subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) | ||
1716 | 16 | |||
1717 | 17 | |||
1718 | 18 | def del_bridge(name): | ||
1719 | 19 | ''' Delete the named bridge from openvswitch ''' | ||
1720 | 20 | log('Deleting bridge {}'.format(name)) | ||
1721 | 21 | subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) | ||
1722 | 22 | |||
1723 | 23 | |||
1724 | 24 | def add_bridge_port(name, port): | ||
1725 | 25 | ''' Add a port to the named openvswitch bridge ''' | ||
1726 | 26 | log('Adding port {} to bridge {}'.format(port, name)) | ||
1727 | 27 | subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port", | ||
1728 | 28 | name, port]) | ||
1729 | 29 | subprocess.check_call(["ip", "link", "set", port, "up"]) | ||
1730 | 30 | |||
1731 | 31 | |||
1732 | 32 | def del_bridge_port(name, port): | ||
1733 | 33 | ''' Delete a port from the named openvswitch bridge ''' | ||
1734 | 34 | log('Deleting port {} from bridge {}'.format(port, name)) | ||
1735 | 35 | subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", | ||
1736 | 36 | name, port]) | ||
1737 | 37 | subprocess.check_call(["ip", "link", "set", port, "down"]) | ||
1738 | 38 | |||
1739 | 39 | |||
1740 | 40 | def set_manager(manager): | ||
1741 | 41 | ''' Set the controller for the local openvswitch ''' | ||
1742 | 42 | log('Setting manager for local ovs to {}'.format(manager)) | ||
1743 | 43 | subprocess.check_call(['ovs-vsctl', 'set-manager', | ||
1744 | 44 | 'ssl:{}'.format(manager)]) | ||
1745 | 45 | |||
1746 | 46 | |||
1747 | 47 | CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' | ||
1748 | 48 | |||
1749 | 49 | |||
1750 | 50 | def get_certificate(): | ||
1751 | 51 | ''' Read openvswitch certificate from disk ''' | ||
1752 | 52 | if os.path.exists(CERT_PATH): | ||
1753 | 53 | log('Reading ovs certificate from {}'.format(CERT_PATH)) | ||
1754 | 54 | with open(CERT_PATH, 'r') as cert: | ||
1755 | 55 | full_cert = cert.read() | ||
1756 | 56 | begin_marker = "-----BEGIN CERTIFICATE-----" | ||
1757 | 57 | end_marker = "-----END CERTIFICATE-----" | ||
1758 | 58 | begin_index = full_cert.find(begin_marker) | ||
1759 | 59 | end_index = full_cert.rfind(end_marker) | ||
1760 | 60 | if end_index == -1 or begin_index == -1: | ||
1761 | 61 | raise RuntimeError("Certificate does not contain valid begin" | ||
1762 | 62 | " and end markers.") | ||
1763 | 63 | full_cert = full_cert[begin_index:(end_index + len(end_marker))] | ||
1764 | 64 | return full_cert | ||
1765 | 65 | else: | ||
1766 | 66 | log('Certificate not found', level=WARNING) | ||
1767 | 67 | return None | ||
1768 | 68 | |||
1769 | 69 | |||
1770 | 70 | def full_restart(): | ||
1771 | 71 | ''' Full restart and reload of openvswitch ''' | ||
1772 | 72 | if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): | ||
1773 | 73 | service('start', 'openvswitch-force-reload-kmod') | ||
1774 | 74 | else: | ||
1775 | 75 | service('force-reload-kmod', 'openvswitch-switch') | ||
1776 | 76 | 0 | ||
1777 | === removed directory 'hooks/charmhelpers/contrib/openstack' | |||
1778 | === removed file 'hooks/charmhelpers/contrib/openstack/__init__.py' | |||
1779 | === removed file 'hooks/charmhelpers/contrib/openstack/alternatives.py' | |||
1780 | --- hooks/charmhelpers/contrib/openstack/alternatives.py 2013-11-26 17:12:54 +0000 | |||
1781 | +++ hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000 | |||
1782 | @@ -1,17 +0,0 @@ | |||
1783 | 1 | ''' Helper for managing alternatives for file conflict resolution ''' | ||
1784 | 2 | |||
1785 | 3 | import subprocess | ||
1786 | 4 | import shutil | ||
1787 | 5 | import os | ||
1788 | 6 | |||
1789 | 7 | |||
1790 | 8 | def install_alternative(name, target, source, priority=50): | ||
1791 | 9 | ''' Install alternative configuration ''' | ||
1792 | 10 | if (os.path.exists(target) and not os.path.islink(target)): | ||
1793 | 11 | # Move existing file/directory away before installing | ||
1794 | 12 | shutil.move(target, '{}.bak'.format(target)) | ||
1795 | 13 | cmd = [ | ||
1796 | 14 | 'update-alternatives', '--force', '--install', | ||
1797 | 15 | target, name, source, str(priority) | ||
1798 | 16 | ] | ||
1799 | 17 | subprocess.check_call(cmd) | ||
1800 | 18 | 0 | ||
1801 | === removed file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
1802 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-05-09 20:11:59 +0000 | |||
1803 | +++ hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000 | |||
1804 | @@ -1,700 +0,0 @@ | |||
1805 | 1 | import json | ||
1806 | 2 | import os | ||
1807 | 3 | import time | ||
1808 | 4 | |||
1809 | 5 | from base64 import b64decode | ||
1810 | 6 | |||
1811 | 7 | from subprocess import ( | ||
1812 | 8 | check_call | ||
1813 | 9 | ) | ||
1814 | 10 | |||
1815 | 11 | |||
1816 | 12 | from charmhelpers.fetch import ( | ||
1817 | 13 | apt_install, | ||
1818 | 14 | filter_installed_packages, | ||
1819 | 15 | ) | ||
1820 | 16 | |||
1821 | 17 | from charmhelpers.core.hookenv import ( | ||
1822 | 18 | config, | ||
1823 | 19 | local_unit, | ||
1824 | 20 | log, | ||
1825 | 21 | relation_get, | ||
1826 | 22 | relation_ids, | ||
1827 | 23 | related_units, | ||
1828 | 24 | unit_get, | ||
1829 | 25 | unit_private_ip, | ||
1830 | 26 | ERROR, | ||
1831 | 27 | ) | ||
1832 | 28 | |||
1833 | 29 | from charmhelpers.contrib.hahelpers.cluster import ( | ||
1834 | 30 | determine_apache_port, | ||
1835 | 31 | determine_api_port, | ||
1836 | 32 | https, | ||
1837 | 33 | is_clustered | ||
1838 | 34 | ) | ||
1839 | 35 | |||
1840 | 36 | from charmhelpers.contrib.hahelpers.apache import ( | ||
1841 | 37 | get_cert, | ||
1842 | 38 | get_ca_cert, | ||
1843 | 39 | ) | ||
1844 | 40 | |||
1845 | 41 | from charmhelpers.contrib.openstack.neutron import ( | ||
1846 | 42 | neutron_plugin_attribute, | ||
1847 | 43 | ) | ||
1848 | 44 | |||
1849 | 45 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | ||
1850 | 46 | |||
1851 | 47 | |||
1852 | 48 | class OSContextError(Exception): | ||
1853 | 49 | pass | ||
1854 | 50 | |||
1855 | 51 | |||
1856 | 52 | def ensure_packages(packages): | ||
1857 | 53 | '''Install but do not upgrade required plugin packages''' | ||
1858 | 54 | required = filter_installed_packages(packages) | ||
1859 | 55 | if required: | ||
1860 | 56 | apt_install(required, fatal=True) | ||
1861 | 57 | |||
1862 | 58 | |||
1863 | 59 | def context_complete(ctxt): | ||
1864 | 60 | _missing = [] | ||
1865 | 61 | for k, v in ctxt.iteritems(): | ||
1866 | 62 | if v is None or v == '': | ||
1867 | 63 | _missing.append(k) | ||
1868 | 64 | if _missing: | ||
1869 | 65 | log('Missing required data: %s' % ' '.join(_missing), level='INFO') | ||
1870 | 66 | return False | ||
1871 | 67 | return True | ||
1872 | 68 | |||
1873 | 69 | |||
1874 | 70 | def config_flags_parser(config_flags): | ||
1875 | 71 | if config_flags.find('==') >= 0: | ||
1876 | 72 | log("config_flags is not in expected format (key=value)", | ||
1877 | 73 | level=ERROR) | ||
1878 | 74 | raise OSContextError | ||
1879 | 75 | # strip the following from each value. | ||
1880 | 76 | post_strippers = ' ,' | ||
1881 | 77 | # we strip any leading/trailing '=' or ' ' from the string then | ||
1882 | 78 | # split on '='. | ||
1883 | 79 | split = config_flags.strip(' =').split('=') | ||
1884 | 80 | limit = len(split) | ||
1885 | 81 | flags = {} | ||
1886 | 82 | for i in xrange(0, limit - 1): | ||
1887 | 83 | current = split[i] | ||
1888 | 84 | next = split[i + 1] | ||
1889 | 85 | vindex = next.rfind(',') | ||
1890 | 86 | if (i == limit - 2) or (vindex < 0): | ||
1891 | 87 | value = next | ||
1892 | 88 | else: | ||
1893 | 89 | value = next[:vindex] | ||
1894 | 90 | |||
1895 | 91 | if i == 0: | ||
1896 | 92 | key = current | ||
1897 | 93 | else: | ||
1898 | 94 | # if this not the first entry, expect an embedded key. | ||
1899 | 95 | index = current.rfind(',') | ||
1900 | 96 | if index < 0: | ||
1901 | 97 | log("invalid config value(s) at index %s" % (i), | ||
1902 | 98 | level=ERROR) | ||
1903 | 99 | raise OSContextError | ||
1904 | 100 | key = current[index + 1:] | ||
1905 | 101 | |||
1906 | 102 | # Add to collection. | ||
1907 | 103 | flags[key.strip(post_strippers)] = value.rstrip(post_strippers) | ||
1908 | 104 | return flags | ||
1909 | 105 | |||
1910 | 106 | |||
1911 | 107 | class OSContextGenerator(object): | ||
1912 | 108 | interfaces = [] | ||
1913 | 109 | |||
1914 | 110 | def __call__(self): | ||
1915 | 111 | raise NotImplementedError | ||
1916 | 112 | |||
1917 | 113 | |||
1918 | 114 | class SharedDBContext(OSContextGenerator): | ||
1919 | 115 | interfaces = ['shared-db'] | ||
1920 | 116 | |||
1921 | 117 | def __init__(self, | ||
1922 | 118 | database=None, user=None, relation_prefix=None, ssl_dir=None): | ||
1923 | 119 | ''' | ||
1924 | 120 | Allows inspecting relation for settings prefixed with relation_prefix. | ||
1925 | 121 | This is useful for parsing access for multiple databases returned via | ||
1926 | 122 | the shared-db interface (eg, nova_password, quantum_password) | ||
1927 | 123 | ''' | ||
1928 | 124 | self.relation_prefix = relation_prefix | ||
1929 | 125 | self.database = database | ||
1930 | 126 | self.user = user | ||
1931 | 127 | self.ssl_dir = ssl_dir | ||
1932 | 128 | |||
1933 | 129 | def __call__(self): | ||
1934 | 130 | self.database = self.database or config('database') | ||
1935 | 131 | self.user = self.user or config('database-user') | ||
1936 | 132 | if None in [self.database, self.user]: | ||
1937 | 133 | log('Could not generate shared_db context. ' | ||
1938 | 134 | 'Missing required charm config options. ' | ||
1939 | 135 | '(database name and user)') | ||
1940 | 136 | raise OSContextError | ||
1941 | 137 | ctxt = {} | ||
1942 | 138 | |||
1943 | 139 | password_setting = 'password' | ||
1944 | 140 | if self.relation_prefix: | ||
1945 | 141 | password_setting = self.relation_prefix + '_password' | ||
1946 | 142 | |||
1947 | 143 | for rid in relation_ids('shared-db'): | ||
1948 | 144 | for unit in related_units(rid): | ||
1949 | 145 | rdata = relation_get(rid=rid, unit=unit) | ||
1950 | 146 | ctxt = { | ||
1951 | 147 | 'database_host': rdata.get('db_host'), | ||
1952 | 148 | 'database': self.database, | ||
1953 | 149 | 'database_user': self.user, | ||
1954 | 150 | 'database_password': rdata.get(password_setting), | ||
1955 | 151 | 'database_type': 'mysql' | ||
1956 | 152 | } | ||
1957 | 153 | if context_complete(ctxt): | ||
1958 | 154 | db_ssl(rdata, ctxt, self.ssl_dir) | ||
1959 | 155 | return ctxt | ||
1960 | 156 | return {} | ||
1961 | 157 | |||
1962 | 158 | |||
1963 | 159 | class PostgresqlDBContext(OSContextGenerator): | ||
1964 | 160 | interfaces = ['pgsql-db'] | ||
1965 | 161 | |||
1966 | 162 | def __init__(self, database=None): | ||
1967 | 163 | self.database = database | ||
1968 | 164 | |||
1969 | 165 | def __call__(self): | ||
1970 | 166 | self.database = self.database or config('database') | ||
1971 | 167 | if self.database is None: | ||
1972 | 168 | log('Could not generate postgresql_db context. ' | ||
1973 | 169 | 'Missing required charm config options. ' | ||
1974 | 170 | '(database name)') | ||
1975 | 171 | raise OSContextError | ||
1976 | 172 | ctxt = {} | ||
1977 | 173 | |||
1978 | 174 | for rid in relation_ids(self.interfaces[0]): | ||
1979 | 175 | for unit in related_units(rid): | ||
1980 | 176 | ctxt = { | ||
1981 | 177 | 'database_host': relation_get('host', rid=rid, unit=unit), | ||
1982 | 178 | 'database': self.database, | ||
1983 | 179 | 'database_user': relation_get('user', rid=rid, unit=unit), | ||
1984 | 180 | 'database_password': relation_get('password', rid=rid, unit=unit), | ||
1985 | 181 | 'database_type': 'postgresql', | ||
1986 | 182 | } | ||
1987 | 183 | if context_complete(ctxt): | ||
1988 | 184 | return ctxt | ||
1989 | 185 | return {} | ||
1990 | 186 | |||
1991 | 187 | |||
1992 | 188 | def db_ssl(rdata, ctxt, ssl_dir): | ||
1993 | 189 | if 'ssl_ca' in rdata and ssl_dir: | ||
1994 | 190 | ca_path = os.path.join(ssl_dir, 'db-client.ca') | ||
1995 | 191 | with open(ca_path, 'w') as fh: | ||
1996 | 192 | fh.write(b64decode(rdata['ssl_ca'])) | ||
1997 | 193 | ctxt['database_ssl_ca'] = ca_path | ||
1998 | 194 | elif 'ssl_ca' in rdata: | ||
1999 | 195 | log("Charm not setup for ssl support but ssl ca found") | ||
2000 | 196 | return ctxt | ||
2001 | 197 | if 'ssl_cert' in rdata: | ||
2002 | 198 | cert_path = os.path.join( | ||
2003 | 199 | ssl_dir, 'db-client.cert') | ||
2004 | 200 | if not os.path.exists(cert_path): | ||
2005 | 201 | log("Waiting 1m for ssl client cert validity") | ||
2006 | 202 | time.sleep(60) | ||
2007 | 203 | with open(cert_path, 'w') as fh: | ||
2008 | 204 | fh.write(b64decode(rdata['ssl_cert'])) | ||
2009 | 205 | ctxt['database_ssl_cert'] = cert_path | ||
2010 | 206 | key_path = os.path.join(ssl_dir, 'db-client.key') | ||
2011 | 207 | with open(key_path, 'w') as fh: | ||
2012 | 208 | fh.write(b64decode(rdata['ssl_key'])) | ||
2013 | 209 | ctxt['database_ssl_key'] = key_path | ||
2014 | 210 | return ctxt | ||
2015 | 211 | |||
2016 | 212 | |||
2017 | 213 | class IdentityServiceContext(OSContextGenerator): | ||
2018 | 214 | interfaces = ['identity-service'] | ||
2019 | 215 | |||
2020 | 216 | def __call__(self): | ||
2021 | 217 | log('Generating template context for identity-service') | ||
2022 | 218 | ctxt = {} | ||
2023 | 219 | |||
2024 | 220 | for rid in relation_ids('identity-service'): | ||
2025 | 221 | for unit in related_units(rid): | ||
2026 | 222 | rdata = relation_get(rid=rid, unit=unit) | ||
2027 | 223 | ctxt = { | ||
2028 | 224 | 'service_port': rdata.get('service_port'), | ||
2029 | 225 | 'service_host': rdata.get('service_host'), | ||
2030 | 226 | 'auth_host': rdata.get('auth_host'), | ||
2031 | 227 | 'auth_port': rdata.get('auth_port'), | ||
2032 | 228 | 'admin_tenant_name': rdata.get('service_tenant'), | ||
2033 | 229 | 'admin_user': rdata.get('service_username'), | ||
2034 | 230 | 'admin_password': rdata.get('service_password'), | ||
2035 | 231 | 'service_protocol': | ||
2036 | 232 | rdata.get('service_protocol') or 'http', | ||
2037 | 233 | 'auth_protocol': | ||
2038 | 234 | rdata.get('auth_protocol') or 'http', | ||
2039 | 235 | } | ||
2040 | 236 | if context_complete(ctxt): | ||
2041 | 237 | # NOTE(jamespage) this is required for >= icehouse | ||
2042 | 238 | # so a missing value just indicates keystone needs | ||
2043 | 239 | # upgrading | ||
2044 | 240 | ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') | ||
2045 | 241 | return ctxt | ||
2046 | 242 | return {} | ||
2047 | 243 | |||
2048 | 244 | |||
2049 | 245 | class AMQPContext(OSContextGenerator): | ||
2050 | 246 | interfaces = ['amqp'] | ||
2051 | 247 | |||
2052 | 248 | def __init__(self, ssl_dir=None): | ||
2053 | 249 | self.ssl_dir = ssl_dir | ||
2054 | 250 | |||
2055 | 251 | def __call__(self): | ||
2056 | 252 | log('Generating template context for amqp') | ||
2057 | 253 | conf = config() | ||
2058 | 254 | try: | ||
2059 | 255 | username = conf['rabbit-user'] | ||
2060 | 256 | vhost = conf['rabbit-vhost'] | ||
2061 | 257 | except KeyError as e: | ||
2062 | 258 | log('Could not generate shared_db context. ' | ||
2063 | 259 | 'Missing required charm config options: %s.' % e) | ||
2064 | 260 | raise OSContextError | ||
2065 | 261 | ctxt = {} | ||
2066 | 262 | for rid in relation_ids('amqp'): | ||
2067 | 263 | ha_vip_only = False | ||
2068 | 264 | for unit in related_units(rid): | ||
2069 | 265 | if relation_get('clustered', rid=rid, unit=unit): | ||
2070 | 266 | ctxt['clustered'] = True | ||
2071 | 267 | ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, | ||
2072 | 268 | unit=unit) | ||
2073 | 269 | else: | ||
2074 | 270 | ctxt['rabbitmq_host'] = relation_get('private-address', | ||
2075 | 271 | rid=rid, unit=unit) | ||
2076 | 272 | ctxt.update({ | ||
2077 | 273 | 'rabbitmq_user': username, | ||
2078 | 274 | 'rabbitmq_password': relation_get('password', rid=rid, | ||
2079 | 275 | unit=unit), | ||
2080 | 276 | 'rabbitmq_virtual_host': vhost, | ||
2081 | 277 | }) | ||
2082 | 278 | |||
2083 | 279 | ssl_port = relation_get('ssl_port', rid=rid, unit=unit) | ||
2084 | 280 | if ssl_port: | ||
2085 | 281 | ctxt['rabbit_ssl_port'] = ssl_port | ||
2086 | 282 | ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) | ||
2087 | 283 | if ssl_ca: | ||
2088 | 284 | ctxt['rabbit_ssl_ca'] = ssl_ca | ||
2089 | 285 | |||
2090 | 286 | if relation_get('ha_queues', rid=rid, unit=unit) is not None: | ||
2091 | 287 | ctxt['rabbitmq_ha_queues'] = True | ||
2092 | 288 | |||
2093 | 289 | ha_vip_only = relation_get('ha-vip-only', | ||
2094 | 290 | rid=rid, unit=unit) is not None | ||
2095 | 291 | |||
2096 | 292 | if context_complete(ctxt): | ||
2097 | 293 | if 'rabbit_ssl_ca' in ctxt: | ||
2098 | 294 | if not self.ssl_dir: | ||
2099 | 295 | log(("Charm not setup for ssl support " | ||
2100 | 296 | "but ssl ca found")) | ||
2101 | 297 | break | ||
2102 | 298 | ca_path = os.path.join( | ||
2103 | 299 | self.ssl_dir, 'rabbit-client-ca.pem') | ||
2104 | 300 | with open(ca_path, 'w') as fh: | ||
2105 | 301 | fh.write(b64decode(ctxt['rabbit_ssl_ca'])) | ||
2106 | 302 | ctxt['rabbit_ssl_ca'] = ca_path | ||
2107 | 303 | # Sufficient information found = break out! | ||
2108 | 304 | break | ||
2109 | 305 | # Used for active/active rabbitmq >= grizzly | ||
2110 | 306 | if ('clustered' not in ctxt or ha_vip_only) \ | ||
2111 | 307 | and len(related_units(rid)) > 1: | ||
2112 | 308 | rabbitmq_hosts = [] | ||
2113 | 309 | for unit in related_units(rid): | ||
2114 | 310 | rabbitmq_hosts.append(relation_get('private-address', | ||
2115 | 311 | rid=rid, unit=unit)) | ||
2116 | 312 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) | ||
2117 | 313 | if not context_complete(ctxt): | ||
2118 | 314 | return {} | ||
2119 | 315 | else: | ||
2120 | 316 | return ctxt | ||
2121 | 317 | |||
2122 | 318 | |||
2123 | 319 | class CephContext(OSContextGenerator): | ||
2124 | 320 | interfaces = ['ceph'] | ||
2125 | 321 | |||
2126 | 322 | def __call__(self): | ||
2127 | 323 | '''This generates context for /etc/ceph/ceph.conf templates''' | ||
2128 | 324 | if not relation_ids('ceph'): | ||
2129 | 325 | return {} | ||
2130 | 326 | |||
2131 | 327 | log('Generating template context for ceph') | ||
2132 | 328 | |||
2133 | 329 | mon_hosts = [] | ||
2134 | 330 | auth = None | ||
2135 | 331 | key = None | ||
2136 | 332 | use_syslog = str(config('use-syslog')).lower() | ||
2137 | 333 | for rid in relation_ids('ceph'): | ||
2138 | 334 | for unit in related_units(rid): | ||
2139 | 335 | mon_hosts.append(relation_get('private-address', rid=rid, | ||
2140 | 336 | unit=unit)) | ||
2141 | 337 | auth = relation_get('auth', rid=rid, unit=unit) | ||
2142 | 338 | key = relation_get('key', rid=rid, unit=unit) | ||
2143 | 339 | |||
2144 | 340 | ctxt = { | ||
2145 | 341 | 'mon_hosts': ' '.join(mon_hosts), | ||
2146 | 342 | 'auth': auth, | ||
2147 | 343 | 'key': key, | ||
2148 | 344 | 'use_syslog': use_syslog | ||
2149 | 345 | } | ||
2150 | 346 | |||
2151 | 347 | if not os.path.isdir('/etc/ceph'): | ||
2152 | 348 | os.mkdir('/etc/ceph') | ||
2153 | 349 | |||
2154 | 350 | if not context_complete(ctxt): | ||
2155 | 351 | return {} | ||
2156 | 352 | |||
2157 | 353 | ensure_packages(['ceph-common']) | ||
2158 | 354 | |||
2159 | 355 | return ctxt | ||
2160 | 356 | |||
2161 | 357 | |||
2162 | 358 | class HAProxyContext(OSContextGenerator): | ||
2163 | 359 | interfaces = ['cluster'] | ||
2164 | 360 | |||
2165 | 361 | def __call__(self): | ||
2166 | 362 | ''' | ||
2167 | 363 | Builds half a context for the haproxy template, which describes | ||
2168 | 364 | all peers to be included in the cluster. Each charm needs to include | ||
2169 | 365 | its own context generator that describes the port mapping. | ||
2170 | 366 | ''' | ||
2171 | 367 | if not relation_ids('cluster'): | ||
2172 | 368 | return {} | ||
2173 | 369 | |||
2174 | 370 | cluster_hosts = {} | ||
2175 | 371 | l_unit = local_unit().replace('/', '-') | ||
2176 | 372 | cluster_hosts[l_unit] = unit_get('private-address') | ||
2177 | 373 | |||
2178 | 374 | for rid in relation_ids('cluster'): | ||
2179 | 375 | for unit in related_units(rid): | ||
2180 | 376 | _unit = unit.replace('/', '-') | ||
2181 | 377 | addr = relation_get('private-address', rid=rid, unit=unit) | ||
2182 | 378 | cluster_hosts[_unit] = addr | ||
2183 | 379 | |||
2184 | 380 | ctxt = { | ||
2185 | 381 | 'units': cluster_hosts, | ||
2186 | 382 | } | ||
2187 | 383 | if len(cluster_hosts.keys()) > 1: | ||
2188 | 384 | # Enable haproxy when we have enough peers. | ||
2189 | 385 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | ||
2190 | 386 | with open('/etc/default/haproxy', 'w') as out: | ||
2191 | 387 | out.write('ENABLED=1\n') | ||
2192 | 388 | return ctxt | ||
2193 | 389 | log('HAProxy context is incomplete, this unit has no peers.') | ||
2194 | 390 | return {} | ||
2195 | 391 | |||
2196 | 392 | |||
2197 | 393 | class ImageServiceContext(OSContextGenerator): | ||
2198 | 394 | interfaces = ['image-service'] | ||
2199 | 395 | |||
2200 | 396 | def __call__(self): | ||
2201 | 397 | ''' | ||
2202 | 398 | Obtains the glance API server from the image-service relation. Useful | ||
2203 | 399 | in nova and cinder (currently). | ||
2204 | 400 | ''' | ||
2205 | 401 | log('Generating template context for image-service.') | ||
2206 | 402 | rids = relation_ids('image-service') | ||
2207 | 403 | if not rids: | ||
2208 | 404 | return {} | ||
2209 | 405 | for rid in rids: | ||
2210 | 406 | for unit in related_units(rid): | ||
2211 | 407 | api_server = relation_get('glance-api-server', | ||
2212 | 408 | rid=rid, unit=unit) | ||
2213 | 409 | if api_server: | ||
2214 | 410 | return {'glance_api_servers': api_server} | ||
2215 | 411 | log('ImageService context is incomplete. ' | ||
2216 | 412 | 'Missing required relation data.') | ||
2217 | 413 | return {} | ||
2218 | 414 | |||
2219 | 415 | |||
2220 | 416 | class ApacheSSLContext(OSContextGenerator): | ||
2221 | 417 | |||
2222 | 418 | """ | ||
2223 | 419 | Generates a context for an apache vhost configuration that configures | ||
2224 | 420 | HTTPS reverse proxying for one or many endpoints. Generated context | ||
2225 | 421 | looks something like: | ||
2226 | 422 | { | ||
2227 | 423 | 'namespace': 'cinder', | ||
2228 | 424 | 'private_address': 'iscsi.mycinderhost.com', | ||
2229 | 425 | 'endpoints': [(8776, 8766), (8777, 8767)] | ||
2230 | 426 | } | ||
2231 | 427 | |||
2232 | 428 | The endpoints list consists of a tuples mapping external ports | ||
2233 | 429 | to internal ports. | ||
2234 | 430 | """ | ||
2235 | 431 | interfaces = ['https'] | ||
2236 | 432 | |||
2237 | 433 | # charms should inherit this context and set external ports | ||
2238 | 434 | # and service namespace accordingly. | ||
2239 | 435 | external_ports = [] | ||
2240 | 436 | service_namespace = None | ||
2241 | 437 | |||
2242 | 438 | def enable_modules(self): | ||
2243 | 439 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] | ||
2244 | 440 | check_call(cmd) | ||
2245 | 441 | |||
2246 | 442 | def configure_cert(self): | ||
2247 | 443 | if not os.path.isdir('/etc/apache2/ssl'): | ||
2248 | 444 | os.mkdir('/etc/apache2/ssl') | ||
2249 | 445 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) | ||
2250 | 446 | if not os.path.isdir(ssl_dir): | ||
2251 | 447 | os.mkdir(ssl_dir) | ||
2252 | 448 | cert, key = get_cert() | ||
2253 | 449 | with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: | ||
2254 | 450 | cert_out.write(b64decode(cert)) | ||
2255 | 451 | with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: | ||
2256 | 452 | key_out.write(b64decode(key)) | ||
2257 | 453 | ca_cert = get_ca_cert() | ||
2258 | 454 | if ca_cert: | ||
2259 | 455 | with open(CA_CERT_PATH, 'w') as ca_out: | ||
2260 | 456 | ca_out.write(b64decode(ca_cert)) | ||
2261 | 457 | check_call(['update-ca-certificates']) | ||
2262 | 458 | |||
2263 | 459 | def __call__(self): | ||
2264 | 460 | if isinstance(self.external_ports, basestring): | ||
2265 | 461 | self.external_ports = [self.external_ports] | ||
2266 | 462 | if (not self.external_ports or not https()): | ||
2267 | 463 | return {} | ||
2268 | 464 | |||
2269 | 465 | self.configure_cert() | ||
2270 | 466 | self.enable_modules() | ||
2271 | 467 | |||
2272 | 468 | ctxt = { | ||
2273 | 469 | 'namespace': self.service_namespace, | ||
2274 | 470 | 'private_address': unit_get('private-address'), | ||
2275 | 471 | 'endpoints': [] | ||
2276 | 472 | } | ||
2277 | 473 | if is_clustered(): | ||
2278 | 474 | ctxt['private_address'] = config('vip') | ||
2279 | 475 | for api_port in self.external_ports: | ||
2280 | 476 | ext_port = determine_apache_port(api_port) | ||
2281 | 477 | int_port = determine_api_port(api_port) | ||
2282 | 478 | portmap = (int(ext_port), int(int_port)) | ||
2283 | 479 | ctxt['endpoints'].append(portmap) | ||
2284 | 480 | return ctxt | ||
2285 | 481 | |||
2286 | 482 | |||
2287 | 483 | class NeutronContext(OSContextGenerator): | ||
2288 | 484 | interfaces = [] | ||
2289 | 485 | |||
2290 | 486 | @property | ||
2291 | 487 | def plugin(self): | ||
2292 | 488 | return None | ||
2293 | 489 | |||
2294 | 490 | @property | ||
2295 | 491 | def network_manager(self): | ||
2296 | 492 | return None | ||
2297 | 493 | |||
2298 | 494 | @property | ||
2299 | 495 | def packages(self): | ||
2300 | 496 | return neutron_plugin_attribute( | ||
2301 | 497 | self.plugin, 'packages', self.network_manager) | ||
2302 | 498 | |||
2303 | 499 | @property | ||
2304 | 500 | def neutron_security_groups(self): | ||
2305 | 501 | return None | ||
2306 | 502 | |||
2307 | 503 | def _ensure_packages(self): | ||
2308 | 504 | [ensure_packages(pkgs) for pkgs in self.packages] | ||
2309 | 505 | |||
2310 | 506 | def _save_flag_file(self): | ||
2311 | 507 | if self.network_manager == 'quantum': | ||
2312 | 508 | _file = '/etc/nova/quantum_plugin.conf' | ||
2313 | 509 | else: | ||
2314 | 510 | _file = '/etc/nova/neutron_plugin.conf' | ||
2315 | 511 | with open(_file, 'wb') as out: | ||
2316 | 512 | out.write(self.plugin + '\n') | ||
2317 | 513 | |||
2318 | 514 | def ovs_ctxt(self): | ||
2319 | 515 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
2320 | 516 | self.network_manager) | ||
2321 | 517 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
2322 | 518 | self.network_manager) | ||
2323 | 519 | ovs_ctxt = { | ||
2324 | 520 | 'core_plugin': driver, | ||
2325 | 521 | 'neutron_plugin': 'ovs', | ||
2326 | 522 | 'neutron_security_groups': self.neutron_security_groups, | ||
2327 | 523 | 'local_ip': unit_private_ip(), | ||
2328 | 524 | 'config': config | ||
2329 | 525 | } | ||
2330 | 526 | |||
2331 | 527 | return ovs_ctxt | ||
2332 | 528 | |||
2333 | 529 | def nvp_ctxt(self): | ||
2334 | 530 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
2335 | 531 | self.network_manager) | ||
2336 | 532 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
2337 | 533 | self.network_manager) | ||
2338 | 534 | nvp_ctxt = { | ||
2339 | 535 | 'core_plugin': driver, | ||
2340 | 536 | 'neutron_plugin': 'nvp', | ||
2341 | 537 | 'neutron_security_groups': self.neutron_security_groups, | ||
2342 | 538 | 'local_ip': unit_private_ip(), | ||
2343 | 539 | 'config': config | ||
2344 | 540 | } | ||
2345 | 541 | |||
2346 | 542 | return nvp_ctxt | ||
2347 | 543 | |||
2348 | 544 | def neutron_ctxt(self): | ||
2349 | 545 | if https(): | ||
2350 | 546 | proto = 'https' | ||
2351 | 547 | else: | ||
2352 | 548 | proto = 'http' | ||
2353 | 549 | if is_clustered(): | ||
2354 | 550 | host = config('vip') | ||
2355 | 551 | else: | ||
2356 | 552 | host = unit_get('private-address') | ||
2357 | 553 | url = '%s://%s:%s' % (proto, host, '9696') | ||
2358 | 554 | ctxt = { | ||
2359 | 555 | 'network_manager': self.network_manager, | ||
2360 | 556 | 'neutron_url': url, | ||
2361 | 557 | } | ||
2362 | 558 | return ctxt | ||
2363 | 559 | |||
2364 | 560 | def __call__(self): | ||
2365 | 561 | self._ensure_packages() | ||
2366 | 562 | |||
2367 | 563 | if self.network_manager not in ['quantum', 'neutron']: | ||
2368 | 564 | return {} | ||
2369 | 565 | |||
2370 | 566 | if not self.plugin: | ||
2371 | 567 | return {} | ||
2372 | 568 | |||
2373 | 569 | ctxt = self.neutron_ctxt() | ||
2374 | 570 | |||
2375 | 571 | if self.plugin == 'ovs': | ||
2376 | 572 | ctxt.update(self.ovs_ctxt()) | ||
2377 | 573 | elif self.plugin == 'nvp': | ||
2378 | 574 | ctxt.update(self.nvp_ctxt()) | ||
2379 | 575 | |||
2380 | 576 | alchemy_flags = config('neutron-alchemy-flags') | ||
2381 | 577 | if alchemy_flags: | ||
2382 | 578 | flags = config_flags_parser(alchemy_flags) | ||
2383 | 579 | ctxt['neutron_alchemy_flags'] = flags | ||
2384 | 580 | |||
2385 | 581 | self._save_flag_file() | ||
2386 | 582 | return ctxt | ||
2387 | 583 | |||
2388 | 584 | |||
2389 | 585 | class OSConfigFlagContext(OSContextGenerator): | ||
2390 | 586 | |||
2391 | 587 | """ | ||
2392 | 588 | Responsible for adding user-defined config-flags in charm config to a | ||
2393 | 589 | template context. | ||
2394 | 590 | |||
2395 | 591 | NOTE: the value of config-flags may be a comma-separated list of | ||
2396 | 592 | key=value pairs and some Openstack config files support | ||
2397 | 593 | comma-separated lists as values. | ||
2398 | 594 | """ | ||
2399 | 595 | |||
2400 | 596 | def __call__(self): | ||
2401 | 597 | config_flags = config('config-flags') | ||
2402 | 598 | if not config_flags: | ||
2403 | 599 | return {} | ||
2404 | 600 | |||
2405 | 601 | flags = config_flags_parser(config_flags) | ||
2406 | 602 | return {'user_config_flags': flags} | ||
2407 | 603 | |||
2408 | 604 | |||
2409 | 605 | class SubordinateConfigContext(OSContextGenerator): | ||
2410 | 606 | |||
2411 | 607 | """ | ||
2412 | 608 | Responsible for inspecting relations to subordinates that | ||
2413 | 609 | may be exporting required config via a json blob. | ||
2414 | 610 | |||
2415 | 611 | The subordinate interface allows subordinates to export their | ||
2416 | 612 | configuration requirements to the principle for multiple config | ||
2417 | 613 | files and multiple serivces. Ie, a subordinate that has interfaces | ||
2418 | 614 | to both glance and nova may export to following yaml blob as json: | ||
2419 | 615 | |||
2420 | 616 | glance: | ||
2421 | 617 | /etc/glance/glance-api.conf: | ||
2422 | 618 | sections: | ||
2423 | 619 | DEFAULT: | ||
2424 | 620 | - [key1, value1] | ||
2425 | 621 | /etc/glance/glance-registry.conf: | ||
2426 | 622 | MYSECTION: | ||
2427 | 623 | - [key2, value2] | ||
2428 | 624 | nova: | ||
2429 | 625 | /etc/nova/nova.conf: | ||
2430 | 626 | sections: | ||
2431 | 627 | DEFAULT: | ||
2432 | 628 | - [key3, value3] | ||
2433 | 629 | |||
2434 | 630 | |||
2435 | 631 | It is then up to the principle charms to subscribe this context to | ||
2436 | 632 | the service+config file it is interestd in. Configuration data will | ||
2437 | 633 | be available in the template context, in glance's case, as: | ||
2438 | 634 | ctxt = { | ||
2439 | 635 | ... other context ... | ||
2440 | 636 | 'subordinate_config': { | ||
2441 | 637 | 'DEFAULT': { | ||
2442 | 638 | 'key1': 'value1', | ||
2443 | 639 | }, | ||
2444 | 640 | 'MYSECTION': { | ||
2445 | 641 | 'key2': 'value2', | ||
2446 | 642 | }, | ||
2447 | 643 | } | ||
2448 | 644 | } | ||
2449 | 645 | |||
2450 | 646 | """ | ||
2451 | 647 | |||
2452 | 648 | def __init__(self, service, config_file, interface): | ||
2453 | 649 | """ | ||
2454 | 650 | :param service : Service name key to query in any subordinate | ||
2455 | 651 | data found | ||
2456 | 652 | :param config_file : Service's config file to query sections | ||
2457 | 653 | :param interface : Subordinate interface to inspect | ||
2458 | 654 | """ | ||
2459 | 655 | self.service = service | ||
2460 | 656 | self.config_file = config_file | ||
2461 | 657 | self.interface = interface | ||
2462 | 658 | |||
2463 | 659 | def __call__(self): | ||
2464 | 660 | ctxt = {} | ||
2465 | 661 | for rid in relation_ids(self.interface): | ||
2466 | 662 | for unit in related_units(rid): | ||
2467 | 663 | sub_config = relation_get('subordinate_configuration', | ||
2468 | 664 | rid=rid, unit=unit) | ||
2469 | 665 | if sub_config and sub_config != '': | ||
2470 | 666 | try: | ||
2471 | 667 | sub_config = json.loads(sub_config) | ||
2472 | 668 | except: | ||
2473 | 669 | log('Could not parse JSON from subordinate_config ' | ||
2474 | 670 | 'setting from %s' % rid, level=ERROR) | ||
2475 | 671 | continue | ||
2476 | 672 | |||
2477 | 673 | if self.service not in sub_config: | ||
2478 | 674 | log('Found subordinate_config on %s but it contained' | ||
2479 | 675 | 'nothing for %s service' % (rid, self.service)) | ||
2480 | 676 | continue | ||
2481 | 677 | |||
2482 | 678 | sub_config = sub_config[self.service] | ||
2483 | 679 | if self.config_file not in sub_config: | ||
2484 | 680 | log('Found subordinate_config on %s but it contained' | ||
2485 | 681 | 'nothing for %s' % (rid, self.config_file)) | ||
2486 | 682 | continue | ||
2487 | 683 | |||
2488 | 684 | sub_config = sub_config[self.config_file] | ||
2489 | 685 | for k, v in sub_config.iteritems(): | ||
2490 | 686 | ctxt[k] = v | ||
2491 | 687 | |||
2492 | 688 | if not ctxt: | ||
2493 | 689 | ctxt['sections'] = {} | ||
2494 | 690 | |||
2495 | 691 | return ctxt | ||
2496 | 692 | |||
2497 | 693 | |||
2498 | 694 | class SyslogContext(OSContextGenerator): | ||
2499 | 695 | |||
2500 | 696 | def __call__(self): | ||
2501 | 697 | ctxt = { | ||
2502 | 698 | 'use_syslog': config('use-syslog') | ||
2503 | 699 | } | ||
2504 | 700 | return ctxt | ||
2505 | 701 | 0 | ||
2506 | === removed file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
2507 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-09 20:11:59 +0000 | |||
2508 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000 | |||
2509 | @@ -1,171 +0,0 @@ | |||
2510 | 1 | # Various utilies for dealing with Neutron and the renaming from Quantum. | ||
2511 | 2 | |||
2512 | 3 | from subprocess import check_output | ||
2513 | 4 | |||
2514 | 5 | from charmhelpers.core.hookenv import ( | ||
2515 | 6 | config, | ||
2516 | 7 | log, | ||
2517 | 8 | ERROR, | ||
2518 | 9 | ) | ||
2519 | 10 | |||
2520 | 11 | from charmhelpers.contrib.openstack.utils import os_release | ||
2521 | 12 | |||
2522 | 13 | |||
2523 | 14 | def headers_package(): | ||
2524 | 15 | """Ensures correct linux-headers for running kernel are installed, | ||
2525 | 16 | for building DKMS package""" | ||
2526 | 17 | kver = check_output(['uname', '-r']).strip() | ||
2527 | 18 | return 'linux-headers-%s' % kver | ||
2528 | 19 | |||
2529 | 20 | QUANTUM_CONF_DIR = '/etc/quantum' | ||
2530 | 21 | |||
2531 | 22 | |||
2532 | 23 | def kernel_version(): | ||
2533 | 24 | """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ | ||
2534 | 25 | kver = check_output(['uname', '-r']).strip() | ||
2535 | 26 | kver = kver.split('.') | ||
2536 | 27 | return (int(kver[0]), int(kver[1])) | ||
2537 | 28 | |||
2538 | 29 | |||
2539 | 30 | def determine_dkms_package(): | ||
2540 | 31 | """ Determine which DKMS package should be used based on kernel version """ | ||
2541 | 32 | # NOTE: 3.13 kernels have support for GRE and VXLAN native | ||
2542 | 33 | if kernel_version() >= (3, 13): | ||
2543 | 34 | return [] | ||
2544 | 35 | else: | ||
2545 | 36 | return ['openvswitch-datapath-dkms'] | ||
2546 | 37 | |||
2547 | 38 | |||
2548 | 39 | # legacy | ||
2549 | 40 | |||
2550 | 41 | |||
2551 | 42 | def quantum_plugins(): | ||
2552 | 43 | from charmhelpers.contrib.openstack import context | ||
2553 | 44 | return { | ||
2554 | 45 | 'ovs': { | ||
2555 | 46 | 'config': '/etc/quantum/plugins/openvswitch/' | ||
2556 | 47 | 'ovs_quantum_plugin.ini', | ||
2557 | 48 | 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' | ||
2558 | 49 | 'OVSQuantumPluginV2', | ||
2559 | 50 | 'contexts': [ | ||
2560 | 51 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2561 | 52 | database=config('neutron-database'), | ||
2562 | 53 | relation_prefix='neutron', | ||
2563 | 54 | ssl_dir=QUANTUM_CONF_DIR)], | ||
2564 | 55 | 'services': ['quantum-plugin-openvswitch-agent'], | ||
2565 | 56 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2566 | 57 | ['quantum-plugin-openvswitch-agent']], | ||
2567 | 58 | 'server_packages': ['quantum-server', | ||
2568 | 59 | 'quantum-plugin-openvswitch'], | ||
2569 | 60 | 'server_services': ['quantum-server'] | ||
2570 | 61 | }, | ||
2571 | 62 | 'nvp': { | ||
2572 | 63 | 'config': '/etc/quantum/plugins/nicira/nvp.ini', | ||
2573 | 64 | 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' | ||
2574 | 65 | 'QuantumPlugin.NvpPluginV2', | ||
2575 | 66 | 'contexts': [ | ||
2576 | 67 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2577 | 68 | database=config('neutron-database'), | ||
2578 | 69 | relation_prefix='neutron', | ||
2579 | 70 | ssl_dir=QUANTUM_CONF_DIR)], | ||
2580 | 71 | 'services': [], | ||
2581 | 72 | 'packages': [], | ||
2582 | 73 | 'server_packages': ['quantum-server', | ||
2583 | 74 | 'quantum-plugin-nicira'], | ||
2584 | 75 | 'server_services': ['quantum-server'] | ||
2585 | 76 | } | ||
2586 | 77 | } | ||
2587 | 78 | |||
2588 | 79 | NEUTRON_CONF_DIR = '/etc/neutron' | ||
2589 | 80 | |||
2590 | 81 | |||
2591 | 82 | def neutron_plugins(): | ||
2592 | 83 | from charmhelpers.contrib.openstack import context | ||
2593 | 84 | release = os_release('nova-common') | ||
2594 | 85 | plugins = { | ||
2595 | 86 | 'ovs': { | ||
2596 | 87 | 'config': '/etc/neutron/plugins/openvswitch/' | ||
2597 | 88 | 'ovs_neutron_plugin.ini', | ||
2598 | 89 | 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' | ||
2599 | 90 | 'OVSNeutronPluginV2', | ||
2600 | 91 | 'contexts': [ | ||
2601 | 92 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2602 | 93 | database=config('neutron-database'), | ||
2603 | 94 | relation_prefix='neutron', | ||
2604 | 95 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2605 | 96 | 'services': ['neutron-plugin-openvswitch-agent'], | ||
2606 | 97 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2607 | 98 | ['neutron-plugin-openvswitch-agent']], | ||
2608 | 99 | 'server_packages': ['neutron-server', | ||
2609 | 100 | 'neutron-plugin-openvswitch'], | ||
2610 | 101 | 'server_services': ['neutron-server'] | ||
2611 | 102 | }, | ||
2612 | 103 | 'nvp': { | ||
2613 | 104 | 'config': '/etc/neutron/plugins/nicira/nvp.ini', | ||
2614 | 105 | 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' | ||
2615 | 106 | 'NeutronPlugin.NvpPluginV2', | ||
2616 | 107 | 'contexts': [ | ||
2617 | 108 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2618 | 109 | database=config('neutron-database'), | ||
2619 | 110 | relation_prefix='neutron', | ||
2620 | 111 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2621 | 112 | 'services': [], | ||
2622 | 113 | 'packages': [], | ||
2623 | 114 | 'server_packages': ['neutron-server', | ||
2624 | 115 | 'neutron-plugin-nicira'], | ||
2625 | 116 | 'server_services': ['neutron-server'] | ||
2626 | 117 | } | ||
2627 | 118 | } | ||
2628 | 119 | # NOTE: patch in ml2 plugin for icehouse onwards | ||
2629 | 120 | if release >= 'icehouse': | ||
2630 | 121 | plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' | ||
2631 | 122 | plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' | ||
2632 | 123 | plugins['ovs']['server_packages'] = ['neutron-server', | ||
2633 | 124 | 'neutron-plugin-ml2'] | ||
2634 | 125 | return plugins | ||
2635 | 126 | |||
2636 | 127 | |||
2637 | 128 | def neutron_plugin_attribute(plugin, attr, net_manager=None): | ||
2638 | 129 | manager = net_manager or network_manager() | ||
2639 | 130 | if manager == 'quantum': | ||
2640 | 131 | plugins = quantum_plugins() | ||
2641 | 132 | elif manager == 'neutron': | ||
2642 | 133 | plugins = neutron_plugins() | ||
2643 | 134 | else: | ||
2644 | 135 | log('Error: Network manager does not support plugins.') | ||
2645 | 136 | raise Exception | ||
2646 | 137 | |||
2647 | 138 | try: | ||
2648 | 139 | _plugin = plugins[plugin] | ||
2649 | 140 | except KeyError: | ||
2650 | 141 | log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) | ||
2651 | 142 | raise Exception | ||
2652 | 143 | |||
2653 | 144 | try: | ||
2654 | 145 | return _plugin[attr] | ||
2655 | 146 | except KeyError: | ||
2656 | 147 | return None | ||
2657 | 148 | |||
2658 | 149 | |||
2659 | 150 | def network_manager(): | ||
2660 | 151 | ''' | ||
2661 | 152 | Deals with the renaming of Quantum to Neutron in H and any situations | ||
2662 | 153 | that require compatability (eg, deploying H with network-manager=quantum, | ||
2663 | 154 | upgrading from G). | ||
2664 | 155 | ''' | ||
2665 | 156 | release = os_release('nova-common') | ||
2666 | 157 | manager = config('network-manager').lower() | ||
2667 | 158 | |||
2668 | 159 | if manager not in ['quantum', 'neutron']: | ||
2669 | 160 | return manager | ||
2670 | 161 | |||
2671 | 162 | if release in ['essex']: | ||
2672 | 163 | # E does not support neutron | ||
2673 | 164 | log('Neutron networking not supported in Essex.', level=ERROR) | ||
2674 | 165 | raise Exception | ||
2675 | 166 | elif release in ['folsom', 'grizzly']: | ||
2676 | 167 | # neutron is named quantum in F and G | ||
2677 | 168 | return 'quantum' | ||
2678 | 169 | else: | ||
2679 | 170 | # ensure accurate naming for all releases post-H | ||
2680 | 171 | return 'neutron' | ||
2681 | 172 | 0 | ||
2682 | === removed directory 'hooks/charmhelpers/contrib/openstack/templates' | |||
2683 | === removed file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py' | |||
2684 | --- hooks/charmhelpers/contrib/openstack/templates/__init__.py 2013-11-26 17:12:54 +0000 | |||
2685 | +++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000 | |||
2686 | @@ -1,2 +0,0 @@ | |||
2687 | 1 | # dummy __init__.py to fool syncer into thinking this is a syncable python | ||
2688 | 2 | # module | ||
2689 | 3 | 0 | ||
2690 | === removed file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
2691 | --- hooks/charmhelpers/contrib/openstack/templating.py 2013-11-26 17:12:54 +0000 | |||
2692 | +++ hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000 | |||
2693 | @@ -1,280 +0,0 @@ | |||
2694 | 1 | import os | ||
2695 | 2 | |||
2696 | 3 | from charmhelpers.fetch import apt_install | ||
2697 | 4 | |||
2698 | 5 | from charmhelpers.core.hookenv import ( | ||
2699 | 6 | log, | ||
2700 | 7 | ERROR, | ||
2701 | 8 | INFO | ||
2702 | 9 | ) | ||
2703 | 10 | |||
2704 | 11 | from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES | ||
2705 | 12 | |||
2706 | 13 | try: | ||
2707 | 14 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | ||
2708 | 15 | except ImportError: | ||
2709 | 16 | # python-jinja2 may not be installed yet, or we're running unittests. | ||
2710 | 17 | FileSystemLoader = ChoiceLoader = Environment = exceptions = None | ||
2711 | 18 | |||
2712 | 19 | |||
2713 | 20 | class OSConfigException(Exception): | ||
2714 | 21 | pass | ||
2715 | 22 | |||
2716 | 23 | |||
2717 | 24 | def get_loader(templates_dir, os_release): | ||
2718 | 25 | """ | ||
2719 | 26 | Create a jinja2.ChoiceLoader containing template dirs up to | ||
2720 | 27 | and including os_release. If directory template directory | ||
2721 | 28 | is missing at templates_dir, it will be omitted from the loader. | ||
2722 | 29 | templates_dir is added to the bottom of the search list as a base | ||
2723 | 30 | loading dir. | ||
2724 | 31 | |||
2725 | 32 | A charm may also ship a templates dir with this module | ||
2726 | 33 | and it will be appended to the bottom of the search list, eg: | ||
2727 | 34 | hooks/charmhelpers/contrib/openstack/templates. | ||
2728 | 35 | |||
2729 | 36 | :param templates_dir: str: Base template directory containing release | ||
2730 | 37 | sub-directories. | ||
2731 | 38 | :param os_release : str: OpenStack release codename to construct template | ||
2732 | 39 | loader. | ||
2733 | 40 | |||
2734 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | ||
2735 | 42 | jinja2.FilesystemLoaders, ordered in descending | ||
2736 | 43 | order by OpenStack release. | ||
2737 | 44 | """ | ||
2738 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | ||
2739 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | ||
2740 | 47 | |||
2741 | 48 | if not os.path.isdir(templates_dir): | ||
2742 | 49 | log('Templates directory not found @ %s.' % templates_dir, | ||
2743 | 50 | level=ERROR) | ||
2744 | 51 | raise OSConfigException | ||
2745 | 52 | |||
2746 | 53 | # the bottom contains tempaltes_dir and possibly a common templates dir | ||
2747 | 54 | # shipped with the helper. | ||
2748 | 55 | loaders = [FileSystemLoader(templates_dir)] | ||
2749 | 56 | helper_templates = os.path.join(os.path.dirname(__file__), 'templates') | ||
2750 | 57 | if os.path.isdir(helper_templates): | ||
2751 | 58 | loaders.append(FileSystemLoader(helper_templates)) | ||
2752 | 59 | |||
2753 | 60 | for rel, tmpl_dir in tmpl_dirs: | ||
2754 | 61 | if os.path.isdir(tmpl_dir): | ||
2755 | 62 | loaders.insert(0, FileSystemLoader(tmpl_dir)) | ||
2756 | 63 | if rel == os_release: | ||
2757 | 64 | break | ||
2758 | 65 | log('Creating choice loader with dirs: %s' % | ||
2759 | 66 | [l.searchpath for l in loaders], level=INFO) | ||
2760 | 67 | return ChoiceLoader(loaders) | ||
2761 | 68 | |||
2762 | 69 | |||
2763 | 70 | class OSConfigTemplate(object): | ||
2764 | 71 | """ | ||
2765 | 72 | Associates a config file template with a list of context generators. | ||
2766 | 73 | Responsible for constructing a template context based on those generators. | ||
2767 | 74 | """ | ||
2768 | 75 | def __init__(self, config_file, contexts): | ||
2769 | 76 | self.config_file = config_file | ||
2770 | 77 | |||
2771 | 78 | if hasattr(contexts, '__call__'): | ||
2772 | 79 | self.contexts = [contexts] | ||
2773 | 80 | else: | ||
2774 | 81 | self.contexts = contexts | ||
2775 | 82 | |||
2776 | 83 | self._complete_contexts = [] | ||
2777 | 84 | |||
2778 | 85 | def context(self): | ||
2779 | 86 | ctxt = {} | ||
2780 | 87 | for context in self.contexts: | ||
2781 | 88 | _ctxt = context() | ||
2782 | 89 | if _ctxt: | ||
2783 | 90 | ctxt.update(_ctxt) | ||
2784 | 91 | # track interfaces for every complete context. | ||
2785 | 92 | [self._complete_contexts.append(interface) | ||
2786 | 93 | for interface in context.interfaces | ||
2787 | 94 | if interface not in self._complete_contexts] | ||
2788 | 95 | return ctxt | ||
2789 | 96 | |||
2790 | 97 | def complete_contexts(self): | ||
2791 | 98 | ''' | ||
2792 | 99 | Return a list of interfaces that have atisfied contexts. | ||
2793 | 100 | ''' | ||
2794 | 101 | if self._complete_contexts: | ||
2795 | 102 | return self._complete_contexts | ||
2796 | 103 | self.context() | ||
2797 | 104 | return self._complete_contexts | ||
2798 | 105 | |||
2799 | 106 | |||
2800 | 107 | class OSConfigRenderer(object): | ||
2801 | 108 | """ | ||
2802 | 109 | This class provides a common templating system to be used by OpenStack | ||
2803 | 110 | charms. It is intended to help charms share common code and templates, | ||
2804 | 111 | and ease the burden of managing config templates across multiple OpenStack | ||
2805 | 112 | releases. | ||
2806 | 113 | |||
2807 | 114 | Basic usage: | ||
2808 | 115 | # import some common context generates from charmhelpers | ||
2809 | 116 | from charmhelpers.contrib.openstack import context | ||
2810 | 117 | |||
2811 | 118 | # Create a renderer object for a specific OS release. | ||
2812 | 119 | configs = OSConfigRenderer(templates_dir='/tmp/templates', | ||
2813 | 120 | openstack_release='folsom') | ||
2814 | 121 | # register some config files with context generators. | ||
2815 | 122 | configs.register(config_file='/etc/nova/nova.conf', | ||
2816 | 123 | contexts=[context.SharedDBContext(), | ||
2817 | 124 | context.AMQPContext()]) | ||
2818 | 125 | configs.register(config_file='/etc/nova/api-paste.ini', | ||
2819 | 126 | contexts=[context.IdentityServiceContext()]) | ||
2820 | 127 | configs.register(config_file='/etc/haproxy/haproxy.conf', | ||
2821 | 128 | contexts=[context.HAProxyContext()]) | ||
2822 | 129 | # write out a single config | ||
2823 | 130 | configs.write('/etc/nova/nova.conf') | ||
2824 | 131 | # write out all registered configs | ||
2825 | 132 | configs.write_all() | ||
2826 | 133 | |||
2827 | 134 | Details: | ||
2828 | 135 | |||
2829 | 136 | OpenStack Releases and template loading | ||
2830 | 137 | --------------------------------------- | ||
2831 | 138 | When the object is instantiated, it is associated with a specific OS | ||
2832 | 139 | release. This dictates how the template loader will be constructed. | ||
2833 | 140 | |||
2834 | 141 | The constructed loader attempts to load the template from several places | ||
2835 | 142 | in the following order: | ||
2836 | 143 | - from the most recent OS release-specific template dir (if one exists) | ||
2837 | 144 | - the base templates_dir | ||
2838 | 145 | - a template directory shipped in the charm with this helper file. | ||
2839 | 146 | |||
2840 | 147 | |||
2841 | 148 | For the example above, '/tmp/templates' contains the following structure: | ||
2842 | 149 | /tmp/templates/nova.conf | ||
2843 | 150 | /tmp/templates/api-paste.ini | ||
2844 | 151 | /tmp/templates/grizzly/api-paste.ini | ||
2845 | 152 | /tmp/templates/havana/api-paste.ini | ||
2846 | 153 | |||
2847 | 154 | Since it was registered with the grizzly release, it first seraches | ||
2848 | 155 | the grizzly directory for nova.conf, then the templates dir. | ||
2849 | 156 | |||
2850 | 157 | When writing api-paste.ini, it will find the template in the grizzly | ||
2851 | 158 | directory. | ||
2852 | 159 | |||
2853 | 160 | If the object were created with folsom, it would fall back to the | ||
2854 | 161 | base templates dir for its api-paste.ini template. | ||
2855 | 162 | |||
2856 | 163 | This system should help manage changes in config files through | ||
2857 | 164 | openstack releases, allowing charms to fall back to the most recently | ||
2858 | 165 | updated config template for a given release | ||
2859 | 166 | |||
2860 | 167 | The haproxy.conf, since it is not shipped in the templates dir, will | ||
2861 | 168 | be loaded from the module directory's template directory, eg | ||
2862 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | ||
2863 | 170 | us to ship common templates (haproxy, apache) with the helpers. | ||
2864 | 171 | |||
2865 | 172 | Context generators | ||
2866 | 173 | --------------------------------------- | ||
2867 | 174 | Context generators are used to generate template contexts during hook | ||
2868 | 175 | execution. Doing so may require inspecting service relations, charm | ||
2869 | 176 | config, etc. When registered, a config file is associated with a list | ||
2870 | 177 | of generators. When a template is rendered and written, all context | ||
2871 | 178 | generates are called in a chain to generate the context dictionary | ||
2872 | 179 | passed to the jinja2 template. See context.py for more info. | ||
2873 | 180 | """ | ||
2874 | 181 | def __init__(self, templates_dir, openstack_release): | ||
2875 | 182 | if not os.path.isdir(templates_dir): | ||
2876 | 183 | log('Could not locate templates dir %s' % templates_dir, | ||
2877 | 184 | level=ERROR) | ||
2878 | 185 | raise OSConfigException | ||
2879 | 186 | |||
2880 | 187 | self.templates_dir = templates_dir | ||
2881 | 188 | self.openstack_release = openstack_release | ||
2882 | 189 | self.templates = {} | ||
2883 | 190 | self._tmpl_env = None | ||
2884 | 191 | |||
2885 | 192 | if None in [Environment, ChoiceLoader, FileSystemLoader]: | ||
2886 | 193 | # if this code is running, the object is created pre-install hook. | ||
2887 | 194 | # jinja2 shouldn't get touched until the module is reloaded on next | ||
2888 | 195 | # hook execution, with proper jinja2 bits successfully imported. | ||
2889 | 196 | apt_install('python-jinja2') | ||
2890 | 197 | |||
2891 | 198 | def register(self, config_file, contexts): | ||
2892 | 199 | """ | ||
2893 | 200 | Register a config file with a list of context generators to be called | ||
2894 | 201 | during rendering. | ||
2895 | 202 | """ | ||
2896 | 203 | self.templates[config_file] = OSConfigTemplate(config_file=config_file, | ||
2897 | 204 | contexts=contexts) | ||
2898 | 205 | log('Registered config file: %s' % config_file, level=INFO) | ||
2899 | 206 | |||
2900 | 207 | def _get_tmpl_env(self): | ||
2901 | 208 | if not self._tmpl_env: | ||
2902 | 209 | loader = get_loader(self.templates_dir, self.openstack_release) | ||
2903 | 210 | self._tmpl_env = Environment(loader=loader) | ||
2904 | 211 | |||
2905 | 212 | def _get_template(self, template): | ||
2906 | 213 | self._get_tmpl_env() | ||
2907 | 214 | template = self._tmpl_env.get_template(template) | ||
2908 | 215 | log('Loaded template from %s' % template.filename, level=INFO) | ||
2909 | 216 | return template | ||
2910 | 217 | |||
2911 | 218 | def render(self, config_file): | ||
2912 | 219 | if config_file not in self.templates: | ||
2913 | 220 | log('Config not registered: %s' % config_file, level=ERROR) | ||
2914 | 221 | raise OSConfigException | ||
2915 | 222 | ctxt = self.templates[config_file].context() | ||
2916 | 223 | |||
2917 | 224 | _tmpl = os.path.basename(config_file) | ||
2918 | 225 | try: | ||
2919 | 226 | template = self._get_template(_tmpl) | ||
2920 | 227 | except exceptions.TemplateNotFound: | ||
2921 | 228 | # if no template is found with basename, try looking for it | ||
2922 | 229 | # using a munged full path, eg: | ||
2923 | 230 | # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf | ||
2924 | 231 | _tmpl = '_'.join(config_file.split('/')[1:]) | ||
2925 | 232 | try: | ||
2926 | 233 | template = self._get_template(_tmpl) | ||
2927 | 234 | except exceptions.TemplateNotFound as e: | ||
2928 | 235 | log('Could not load template from %s by %s or %s.' % | ||
2929 | 236 | (self.templates_dir, os.path.basename(config_file), _tmpl), | ||
2930 | 237 | level=ERROR) | ||
2931 | 238 | raise e | ||
2932 | 239 | |||
2933 | 240 | log('Rendering from template: %s' % _tmpl, level=INFO) | ||
2934 | 241 | return template.render(ctxt) | ||
2935 | 242 | |||
2936 | 243 | def write(self, config_file): | ||
2937 | 244 | """ | ||
2938 | 245 | Write a single config file, raises if config file is not registered. | ||
2939 | 246 | """ | ||
2940 | 247 | if config_file not in self.templates: | ||
2941 | 248 | log('Config not registered: %s' % config_file, level=ERROR) | ||
2942 | 249 | raise OSConfigException | ||
2943 | 250 | |||
2944 | 251 | _out = self.render(config_file) | ||
2945 | 252 | |||
2946 | 253 | with open(config_file, 'wb') as out: | ||
2947 | 254 | out.write(_out) | ||
2948 | 255 | |||
2949 | 256 | log('Wrote template %s.' % config_file, level=INFO) | ||
2950 | 257 | |||
2951 | 258 | def write_all(self): | ||
2952 | 259 | """ | ||
2953 | 260 | Write out all registered config files. | ||
2954 | 261 | """ | ||
2955 | 262 | [self.write(k) for k in self.templates.iterkeys()] | ||
2956 | 263 | |||
2957 | 264 | def set_release(self, openstack_release): | ||
2958 | 265 | """ | ||
2959 | 266 | Resets the template environment and generates a new template loader | ||
2960 | 267 | based on a the new openstack release. | ||
2961 | 268 | """ | ||
2962 | 269 | self._tmpl_env = None | ||
2963 | 270 | self.openstack_release = openstack_release | ||
2964 | 271 | self._get_tmpl_env() | ||
2965 | 272 | |||
2966 | 273 | def complete_contexts(self): | ||
2967 | 274 | ''' | ||
2968 | 275 | Returns a list of context interfaces that yield a complete context. | ||
2969 | 276 | ''' | ||
2970 | 277 | interfaces = [] | ||
2971 | 278 | [interfaces.extend(i.complete_contexts()) | ||
2972 | 279 | for i in self.templates.itervalues()] | ||
2973 | 280 | return interfaces | ||
2974 | 281 | 0 | ||
2975 | === removed file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
2976 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-09 20:11:59 +0000 | |||
2977 | +++ hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000 | |||
2978 | @@ -1,450 +0,0 @@ | |||
2979 | 1 | #!/usr/bin/python | ||
2980 | 2 | |||
2981 | 3 | # Common python helper functions used for OpenStack charms. | ||
2982 | 4 | from collections import OrderedDict | ||
2983 | 5 | |||
2984 | 6 | import apt_pkg as apt | ||
2985 | 7 | import subprocess | ||
2986 | 8 | import os | ||
2987 | 9 | import socket | ||
2988 | 10 | import sys | ||
2989 | 11 | |||
2990 | 12 | from charmhelpers.core.hookenv import ( | ||
2991 | 13 | config, | ||
2992 | 14 | log as juju_log, | ||
2993 | 15 | charm_dir, | ||
2994 | 16 | ERROR, | ||
2995 | 17 | INFO | ||
2996 | 18 | ) | ||
2997 | 19 | |||
2998 | 20 | from charmhelpers.contrib.storage.linux.lvm import ( | ||
2999 | 21 | deactivate_lvm_volume_group, | ||
3000 | 22 | is_lvm_physical_volume, | ||
3001 | 23 | remove_lvm_physical_volume, | ||
3002 | 24 | ) | ||
3003 | 25 | |||
3004 | 26 | from charmhelpers.core.host import lsb_release, mounts, umount | ||
3005 | 27 | from charmhelpers.fetch import apt_install | ||
3006 | 28 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | ||
3007 | 29 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | ||
3008 | 30 | |||
3009 | 31 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" | ||
3010 | 32 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' | ||
3011 | 33 | |||
3012 | 34 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | ||
3013 | 35 | 'restricted main multiverse universe') | ||
3014 | 36 | |||
3015 | 37 | |||
3016 | 38 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | ||
3017 | 39 | ('oneiric', 'diablo'), | ||
3018 | 40 | ('precise', 'essex'), | ||
3019 | 41 | ('quantal', 'folsom'), | ||
3020 | 42 | ('raring', 'grizzly'), | ||
3021 | 43 | ('saucy', 'havana'), | ||
3022 | 44 | ('trusty', 'icehouse') | ||
3023 | 45 | ]) | ||
3024 | 46 | |||
3025 | 47 | |||
3026 | 48 | OPENSTACK_CODENAMES = OrderedDict([ | ||
3027 | 49 | ('2011.2', 'diablo'), | ||
3028 | 50 | ('2012.1', 'essex'), | ||
3029 | 51 | ('2012.2', 'folsom'), | ||
3030 | 52 | ('2013.1', 'grizzly'), | ||
3031 | 53 | ('2013.2', 'havana'), | ||
3032 | 54 | ('2014.1', 'icehouse'), | ||
3033 | 55 | ]) | ||
3034 | 56 | |||
3035 | 57 | # The ugly duckling | ||
3036 | 58 | SWIFT_CODENAMES = OrderedDict([ | ||
3037 | 59 | ('1.4.3', 'diablo'), | ||
3038 | 60 | ('1.4.8', 'essex'), | ||
3039 | 61 | ('1.7.4', 'folsom'), | ||
3040 | 62 | ('1.8.0', 'grizzly'), | ||
3041 | 63 | ('1.7.7', 'grizzly'), | ||
3042 | 64 | ('1.7.6', 'grizzly'), | ||
3043 | 65 | ('1.10.0', 'havana'), | ||
3044 | 66 | ('1.9.1', 'havana'), | ||
3045 | 67 | ('1.9.0', 'havana'), | ||
3046 | 68 | ('1.13.1', 'icehouse'), | ||
3047 | 69 | ('1.13.0', 'icehouse'), | ||
3048 | 70 | ('1.12.0', 'icehouse'), | ||
3049 | 71 | ('1.11.0', 'icehouse'), | ||
3050 | 72 | ]) | ||
3051 | 73 | |||
3052 | 74 | DEFAULT_LOOPBACK_SIZE = '5G' | ||
3053 | 75 | |||
3054 | 76 | |||
3055 | 77 | def error_out(msg): | ||
3056 | 78 | juju_log("FATAL ERROR: %s" % msg, level='ERROR') | ||
3057 | 79 | sys.exit(1) | ||
3058 | 80 | |||
3059 | 81 | |||
3060 | 82 | def get_os_codename_install_source(src): | ||
3061 | 83 | '''Derive OpenStack release codename from a given installation source.''' | ||
3062 | 84 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3063 | 85 | rel = '' | ||
3064 | 86 | if src in ['distro', 'distro-proposed']: | ||
3065 | 87 | try: | ||
3066 | 88 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | ||
3067 | 89 | except KeyError: | ||
3068 | 90 | e = 'Could not derive openstack release for '\ | ||
3069 | 91 | 'this Ubuntu release: %s' % ubuntu_rel | ||
3070 | 92 | error_out(e) | ||
3071 | 93 | return rel | ||
3072 | 94 | |||
3073 | 95 | if src.startswith('cloud:'): | ||
3074 | 96 | ca_rel = src.split(':')[1] | ||
3075 | 97 | ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] | ||
3076 | 98 | return ca_rel | ||
3077 | 99 | |||
3078 | 100 | # Best guess match based on deb string provided | ||
3079 | 101 | if src.startswith('deb') or src.startswith('ppa'): | ||
3080 | 102 | for k, v in OPENSTACK_CODENAMES.iteritems(): | ||
3081 | 103 | if v in src: | ||
3082 | 104 | return v | ||
3083 | 105 | |||
3084 | 106 | |||
3085 | 107 | def get_os_version_install_source(src): | ||
3086 | 108 | codename = get_os_codename_install_source(src) | ||
3087 | 109 | return get_os_version_codename(codename) | ||
3088 | 110 | |||
3089 | 111 | |||
3090 | 112 | def get_os_codename_version(vers): | ||
3091 | 113 | '''Determine OpenStack codename from version number.''' | ||
3092 | 114 | try: | ||
3093 | 115 | return OPENSTACK_CODENAMES[vers] | ||
3094 | 116 | except KeyError: | ||
3095 | 117 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3096 | 118 | error_out(e) | ||
3097 | 119 | |||
3098 | 120 | |||
3099 | 121 | def get_os_version_codename(codename): | ||
3100 | 122 | '''Determine OpenStack version number from codename.''' | ||
3101 | 123 | for k, v in OPENSTACK_CODENAMES.iteritems(): | ||
3102 | 124 | if v == codename: | ||
3103 | 125 | return k | ||
3104 | 126 | e = 'Could not derive OpenStack version for '\ | ||
3105 | 127 | 'codename: %s' % codename | ||
3106 | 128 | error_out(e) | ||
3107 | 129 | |||
3108 | 130 | |||
3109 | 131 | def get_os_codename_package(package, fatal=True): | ||
3110 | 132 | '''Derive OpenStack release codename from an installed package.''' | ||
3111 | 133 | apt.init() | ||
3112 | 134 | cache = apt.Cache() | ||
3113 | 135 | |||
3114 | 136 | try: | ||
3115 | 137 | pkg = cache[package] | ||
3116 | 138 | except: | ||
3117 | 139 | if not fatal: | ||
3118 | 140 | return None | ||
3119 | 141 | # the package is unknown to the current apt cache. | ||
3120 | 142 | e = 'Could not determine version of package with no installation '\ | ||
3121 | 143 | 'candidate: %s' % package | ||
3122 | 144 | error_out(e) | ||
3123 | 145 | |||
3124 | 146 | if not pkg.current_ver: | ||
3125 | 147 | if not fatal: | ||
3126 | 148 | return None | ||
3127 | 149 | # package is known, but no version is currently installed. | ||
3128 | 150 | e = 'Could not determine version of uninstalled package: %s' % package | ||
3129 | 151 | error_out(e) | ||
3130 | 152 | |||
3131 | 153 | vers = apt.upstream_version(pkg.current_ver.ver_str) | ||
3132 | 154 | |||
3133 | 155 | try: | ||
3134 | 156 | if 'swift' in pkg.name: | ||
3135 | 157 | swift_vers = vers[:5] | ||
3136 | 158 | if swift_vers not in SWIFT_CODENAMES: | ||
3137 | 159 | # Deal with 1.10.0 upward | ||
3138 | 160 | swift_vers = vers[:6] | ||
3139 | 161 | return SWIFT_CODENAMES[swift_vers] | ||
3140 | 162 | else: | ||
3141 | 163 | vers = vers[:6] | ||
3142 | 164 | return OPENSTACK_CODENAMES[vers] | ||
3143 | 165 | except KeyError: | ||
3144 | 166 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3145 | 167 | error_out(e) | ||
3146 | 168 | |||
3147 | 169 | |||
3148 | 170 | def get_os_version_package(pkg, fatal=True): | ||
3149 | 171 | '''Derive OpenStack version number from an installed package.''' | ||
3150 | 172 | codename = get_os_codename_package(pkg, fatal=fatal) | ||
3151 | 173 | |||
3152 | 174 | if not codename: | ||
3153 | 175 | return None | ||
3154 | 176 | |||
3155 | 177 | if 'swift' in pkg: | ||
3156 | 178 | vers_map = SWIFT_CODENAMES | ||
3157 | 179 | else: | ||
3158 | 180 | vers_map = OPENSTACK_CODENAMES | ||
3159 | 181 | |||
3160 | 182 | for version, cname in vers_map.iteritems(): | ||
3161 | 183 | if cname == codename: | ||
3162 | 184 | return version | ||
3163 | 185 | #e = "Could not determine OpenStack version for package: %s" % pkg | ||
3164 | 186 | #error_out(e) | ||
3165 | 187 | |||
3166 | 188 | |||
3167 | 189 | os_rel = None | ||
3168 | 190 | |||
3169 | 191 | |||
3170 | 192 | def os_release(package, base='essex'): | ||
3171 | 193 | ''' | ||
3172 | 194 | Returns OpenStack release codename from a cached global. | ||
3173 | 195 | If the codename can not be determined from either an installed package or | ||
3174 | 196 | the installation source, the earliest release supported by the charm should | ||
3175 | 197 | be returned. | ||
3176 | 198 | ''' | ||
3177 | 199 | global os_rel | ||
3178 | 200 | if os_rel: | ||
3179 | 201 | return os_rel | ||
3180 | 202 | os_rel = (get_os_codename_package(package, fatal=False) or | ||
3181 | 203 | get_os_codename_install_source(config('openstack-origin')) or | ||
3182 | 204 | base) | ||
3183 | 205 | return os_rel | ||
3184 | 206 | |||
3185 | 207 | |||
3186 | 208 | def import_key(keyid): | ||
3187 | 209 | cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ | ||
3188 | 210 | "--recv-keys %s" % keyid | ||
3189 | 211 | try: | ||
3190 | 212 | subprocess.check_call(cmd.split(' ')) | ||
3191 | 213 | except subprocess.CalledProcessError: | ||
3192 | 214 | error_out("Error importing repo key %s" % keyid) | ||
3193 | 215 | |||
3194 | 216 | |||
3195 | 217 | def configure_installation_source(rel): | ||
3196 | 218 | '''Configure apt installation source.''' | ||
3197 | 219 | if rel == 'distro': | ||
3198 | 220 | return | ||
3199 | 221 | elif rel == 'distro-proposed': | ||
3200 | 222 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3201 | 223 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
3202 | 224 | f.write(DISTRO_PROPOSED % ubuntu_rel) | ||
3203 | 225 | elif rel[:4] == "ppa:": | ||
3204 | 226 | src = rel | ||
3205 | 227 | subprocess.check_call(["add-apt-repository", "-y", src]) | ||
3206 | 228 | elif rel[:3] == "deb": | ||
3207 | 229 | l = len(rel.split('|')) | ||
3208 | 230 | if l == 2: | ||
3209 | 231 | src, key = rel.split('|') | ||
3210 | 232 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
3211 | 233 | import_key(key) | ||
3212 | 234 | elif l == 1: | ||
3213 | 235 | src = rel | ||
3214 | 236 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
3215 | 237 | f.write(src) | ||
3216 | 238 | elif rel[:6] == 'cloud:': | ||
3217 | 239 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3218 | 240 | rel = rel.split(':')[1] | ||
3219 | 241 | u_rel = rel.split('-')[0] | ||
3220 | 242 | ca_rel = rel.split('-')[1] | ||
3221 | 243 | |||
3222 | 244 | if u_rel != ubuntu_rel: | ||
3223 | 245 | e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ | ||
3224 | 246 | 'version (%s)' % (ca_rel, ubuntu_rel) | ||
3225 | 247 | error_out(e) | ||
3226 | 248 | |||
3227 | 249 | if 'staging' in ca_rel: | ||
3228 | 250 | # staging is just a regular PPA. | ||
3229 | 251 | os_rel = ca_rel.split('/')[0] | ||
3230 | 252 | ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel | ||
3231 | 253 | cmd = 'add-apt-repository -y %s' % ppa | ||
3232 | 254 | subprocess.check_call(cmd.split(' ')) | ||
3233 | 255 | return | ||
3234 | 256 | |||
3235 | 257 | # map charm config options to actual archive pockets. | ||
3236 | 258 | pockets = { | ||
3237 | 259 | 'folsom': 'precise-updates/folsom', | ||
3238 | 260 | 'folsom/updates': 'precise-updates/folsom', | ||
3239 | 261 | 'folsom/proposed': 'precise-proposed/folsom', | ||
3240 | 262 | 'grizzly': 'precise-updates/grizzly', | ||
3241 | 263 | 'grizzly/updates': 'precise-updates/grizzly', | ||
3242 | 264 | 'grizzly/proposed': 'precise-proposed/grizzly', | ||
3243 | 265 | 'havana': 'precise-updates/havana', | ||
3244 | 266 | 'havana/updates': 'precise-updates/havana', | ||
3245 | 267 | 'havana/proposed': 'precise-proposed/havana', | ||
3246 | 268 | 'icehouse': 'precise-updates/icehouse', | ||
3247 | 269 | 'icehouse/updates': 'precise-updates/icehouse', | ||
3248 | 270 | 'icehouse/proposed': 'precise-proposed/icehouse', | ||
3249 | 271 | } | ||
3250 | 272 | |||
3251 | 273 | try: | ||
3252 | 274 | pocket = pockets[ca_rel] | ||
3253 | 275 | except KeyError: | ||
3254 | 276 | e = 'Invalid Cloud Archive release specified: %s' % rel | ||
3255 | 277 | error_out(e) | ||
3256 | 278 | |||
3257 | 279 | src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) | ||
3258 | 280 | apt_install('ubuntu-cloud-keyring', fatal=True) | ||
3259 | 281 | |||
3260 | 282 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: | ||
3261 | 283 | f.write(src) | ||
3262 | 284 | else: | ||
3263 | 285 | error_out("Invalid openstack-release specified: %s" % rel) | ||
3264 | 286 | |||
3265 | 287 | |||
3266 | 288 | def save_script_rc(script_path="scripts/scriptrc", **env_vars): | ||
3267 | 289 | """ | ||
3268 | 290 | Write an rc file in the charm-delivered directory containing | ||
3269 | 291 | exported environment variables provided by env_vars. Any charm scripts run | ||
3270 | 292 | outside the juju hook environment can source this scriptrc to obtain | ||
3271 | 293 | updated config information necessary to perform health checks or | ||
3272 | 294 | service changes. | ||
3273 | 295 | """ | ||
3274 | 296 | juju_rc_path = "%s/%s" % (charm_dir(), script_path) | ||
3275 | 297 | if not os.path.exists(os.path.dirname(juju_rc_path)): | ||
3276 | 298 | os.mkdir(os.path.dirname(juju_rc_path)) | ||
3277 | 299 | with open(juju_rc_path, 'wb') as rc_script: | ||
3278 | 300 | rc_script.write( | ||
3279 | 301 | "#!/bin/bash\n") | ||
3280 | 302 | [rc_script.write('export %s=%s\n' % (u, p)) | ||
3281 | 303 | for u, p in env_vars.iteritems() if u != "script_path"] | ||
3282 | 304 | |||
3283 | 305 | |||
3284 | 306 | def openstack_upgrade_available(package): | ||
3285 | 307 | """ | ||
3286 | 308 | Determines if an OpenStack upgrade is available from installation | ||
3287 | 309 | source, based on version of installed package. | ||
3288 | 310 | |||
3289 | 311 | :param package: str: Name of installed package. | ||
3290 | 312 | |||
3291 | 313 | :returns: bool: : Returns True if configured installation source offers | ||
3292 | 314 | a newer version of package. | ||
3293 | 315 | |||
3294 | 316 | """ | ||
3295 | 317 | |||
3296 | 318 | src = config('openstack-origin') | ||
3297 | 319 | cur_vers = get_os_version_package(package) | ||
3298 | 320 | available_vers = get_os_version_install_source(src) | ||
3299 | 321 | apt.init() | ||
3300 | 322 | return apt.version_compare(available_vers, cur_vers) == 1 | ||
3301 | 323 | |||
3302 | 324 | |||
3303 | 325 | def ensure_block_device(block_device): | ||
3304 | 326 | ''' | ||
3305 | 327 | Confirm block_device, create as loopback if necessary. | ||
3306 | 328 | |||
3307 | 329 | :param block_device: str: Full path of block device to ensure. | ||
3308 | 330 | |||
3309 | 331 | :returns: str: Full path of ensured block device. | ||
3310 | 332 | ''' | ||
3311 | 333 | _none = ['None', 'none', None] | ||
3312 | 334 | if (block_device in _none): | ||
3313 | 335 | error_out('prepare_storage(): Missing required input: ' | ||
3314 | 336 | 'block_device=%s.' % block_device, level=ERROR) | ||
3315 | 337 | |||
3316 | 338 | if block_device.startswith('/dev/'): | ||
3317 | 339 | bdev = block_device | ||
3318 | 340 | elif block_device.startswith('/'): | ||
3319 | 341 | _bd = block_device.split('|') | ||
3320 | 342 | if len(_bd) == 2: | ||
3321 | 343 | bdev, size = _bd | ||
3322 | 344 | else: | ||
3323 | 345 | bdev = block_device | ||
3324 | 346 | size = DEFAULT_LOOPBACK_SIZE | ||
3325 | 347 | bdev = ensure_loopback_device(bdev, size) | ||
3326 | 348 | else: | ||
3327 | 349 | bdev = '/dev/%s' % block_device | ||
3328 | 350 | |||
3329 | 351 | if not is_block_device(bdev): | ||
3330 | 352 | error_out('Failed to locate valid block device at %s' % bdev, | ||
3331 | 353 | level=ERROR) | ||
3332 | 354 | |||
3333 | 355 | return bdev | ||
3334 | 356 | |||
3335 | 357 | |||
3336 | 358 | def clean_storage(block_device): | ||
3337 | 359 | ''' | ||
3338 | 360 | Ensures a block device is clean. That is: | ||
3339 | 361 | - unmounted | ||
3340 | 362 | - any lvm volume groups are deactivated | ||
3341 | 363 | - any lvm physical device signatures removed | ||
3342 | 364 | - partition table wiped | ||
3343 | 365 | |||
3344 | 366 | :param block_device: str: Full path to block device to clean. | ||
3345 | 367 | ''' | ||
3346 | 368 | for mp, d in mounts(): | ||
3347 | 369 | if d == block_device: | ||
3348 | 370 | juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % | ||
3349 | 371 | (d, mp), level=INFO) | ||
3350 | 372 | umount(mp, persist=True) | ||
3351 | 373 | |||
3352 | 374 | if is_lvm_physical_volume(block_device): | ||
3353 | 375 | deactivate_lvm_volume_group(block_device) | ||
3354 | 376 | remove_lvm_physical_volume(block_device) | ||
3355 | 377 | else: | ||
3356 | 378 | zap_disk(block_device) | ||
3357 | 379 | |||
3358 | 380 | |||
3359 | 381 | def is_ip(address): | ||
3360 | 382 | """ | ||
3361 | 383 | Returns True if address is a valid IP address. | ||
3362 | 384 | """ | ||
3363 | 385 | try: | ||
3364 | 386 | # Test to see if already an IPv4 address | ||
3365 | 387 | socket.inet_aton(address) | ||
3366 | 388 | return True | ||
3367 | 389 | except socket.error: | ||
3368 | 390 | return False | ||
3369 | 391 | |||
3370 | 392 | |||
3371 | 393 | def ns_query(address): | ||
3372 | 394 | try: | ||
3373 | 395 | import dns.resolver | ||
3374 | 396 | except ImportError: | ||
3375 | 397 | apt_install('python-dnspython') | ||
3376 | 398 | import dns.resolver | ||
3377 | 399 | |||
3378 | 400 | if isinstance(address, dns.name.Name): | ||
3379 | 401 | rtype = 'PTR' | ||
3380 | 402 | elif isinstance(address, basestring): | ||
3381 | 403 | rtype = 'A' | ||
3382 | 404 | else: | ||
3383 | 405 | return None | ||
3384 | 406 | |||
3385 | 407 | answers = dns.resolver.query(address, rtype) | ||
3386 | 408 | if answers: | ||
3387 | 409 | return str(answers[0]) | ||
3388 | 410 | return None | ||
3389 | 411 | |||
3390 | 412 | |||
3391 | 413 | def get_host_ip(hostname): | ||
3392 | 414 | """ | ||
3393 | 415 | Resolves the IP for a given hostname, or returns | ||
3394 | 416 | the input if it is already an IP. | ||
3395 | 417 | """ | ||
3396 | 418 | if is_ip(hostname): | ||
3397 | 419 | return hostname | ||
3398 | 420 | |||
3399 | 421 | return ns_query(hostname) | ||
3400 | 422 | |||
3401 | 423 | |||
3402 | 424 | def get_hostname(address, fqdn=True): | ||
3403 | 425 | """ | ||
3404 | 426 | Resolves hostname for given IP, or returns the input | ||
3405 | 427 | if it is already a hostname. | ||
3406 | 428 | """ | ||
3407 | 429 | if is_ip(address): | ||
3408 | 430 | try: | ||
3409 | 431 | import dns.reversename | ||
3410 | 432 | except ImportError: | ||
3411 | 433 | apt_install('python-dnspython') | ||
3412 | 434 | import dns.reversename | ||
3413 | 435 | |||
3414 | 436 | rev = dns.reversename.from_address(address) | ||
3415 | 437 | result = ns_query(rev) | ||
3416 | 438 | if not result: | ||
3417 | 439 | return None | ||
3418 | 440 | else: | ||
3419 | 441 | result = address | ||
3420 | 442 | |||
3421 | 443 | if fqdn: | ||
3422 | 444 | # strip trailing . | ||
3423 | 445 | if result.endswith('.'): | ||
3424 | 446 | return result[:-1] | ||
3425 | 447 | else: | ||
3426 | 448 | return result | ||
3427 | 449 | else: | ||
3428 | 450 | return result.split('.')[0] | ||
3429 | 451 | 0 | ||
3430 | === removed directory 'hooks/charmhelpers/contrib/peerstorage' | |||
3431 | === removed file 'hooks/charmhelpers/contrib/peerstorage/__init__.py' | |||
3432 | --- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-05-09 20:11:59 +0000 | |||
3433 | +++ hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000 | |||
3434 | @@ -1,83 +0,0 @@ | |||
3435 | 1 | from charmhelpers.core.hookenv import ( | ||
3436 | 2 | relation_ids, | ||
3437 | 3 | relation_get, | ||
3438 | 4 | local_unit, | ||
3439 | 5 | relation_set, | ||
3440 | 6 | ) | ||
3441 | 7 | |||
3442 | 8 | """ | ||
3443 | 9 | This helper provides functions to support use of a peer relation | ||
3444 | 10 | for basic key/value storage, with the added benefit that all storage | ||
3445 | 11 | can be replicated across peer units, so this is really useful for | ||
3446 | 12 | services that issue usernames/passwords to remote services. | ||
3447 | 13 | |||
3448 | 14 | def shared_db_changed() | ||
3449 | 15 | # Only the lead unit should create passwords | ||
3450 | 16 | if not is_leader(): | ||
3451 | 17 | return | ||
3452 | 18 | username = relation_get('username') | ||
3453 | 19 | key = '{}.password'.format(username) | ||
3454 | 20 | # Attempt to retrieve any existing password for this user | ||
3455 | 21 | password = peer_retrieve(key) | ||
3456 | 22 | if password is None: | ||
3457 | 23 | # New user, create password and store | ||
3458 | 24 | password = pwgen(length=64) | ||
3459 | 25 | peer_store(key, password) | ||
3460 | 26 | create_access(username, password) | ||
3461 | 27 | relation_set(password=password) | ||
3462 | 28 | |||
3463 | 29 | |||
3464 | 30 | def cluster_changed() | ||
3465 | 31 | # Echo any relation data other that *-address | ||
3466 | 32 | # back onto the peer relation so all units have | ||
3467 | 33 | # all *.password keys stored on their local relation | ||
3468 | 34 | # for later retrieval. | ||
3469 | 35 | peer_echo() | ||
3470 | 36 | |||
3471 | 37 | """ | ||
3472 | 38 | |||
3473 | 39 | |||
3474 | 40 | def peer_retrieve(key, relation_name='cluster'): | ||
3475 | 41 | """ Retrieve a named key from peer relation relation_name """ | ||
3476 | 42 | cluster_rels = relation_ids(relation_name) | ||
3477 | 43 | if len(cluster_rels) > 0: | ||
3478 | 44 | cluster_rid = cluster_rels[0] | ||
3479 | 45 | return relation_get(attribute=key, rid=cluster_rid, | ||
3480 | 46 | unit=local_unit()) | ||
3481 | 47 | else: | ||
3482 | 48 | raise ValueError('Unable to detect' | ||
3483 | 49 | 'peer relation {}'.format(relation_name)) | ||
3484 | 50 | |||
3485 | 51 | |||
3486 | 52 | def peer_store(key, value, relation_name='cluster'): | ||
3487 | 53 | """ Store the key/value pair on the named peer relation relation_name """ | ||
3488 | 54 | cluster_rels = relation_ids(relation_name) | ||
3489 | 55 | if len(cluster_rels) > 0: | ||
3490 | 56 | cluster_rid = cluster_rels[0] | ||
3491 | 57 | relation_set(relation_id=cluster_rid, | ||
3492 | 58 | relation_settings={key: value}) | ||
3493 | 59 | else: | ||
3494 | 60 | raise ValueError('Unable to detect ' | ||
3495 | 61 | 'peer relation {}'.format(relation_name)) | ||
3496 | 62 | |||
3497 | 63 | |||
3498 | 64 | def peer_echo(includes=None): | ||
3499 | 65 | """Echo filtered attributes back onto the same relation for storage | ||
3500 | 66 | |||
3501 | 67 | Note that this helper must only be called within a peer relation | ||
3502 | 68 | changed hook | ||
3503 | 69 | """ | ||
3504 | 70 | rdata = relation_get() | ||
3505 | 71 | echo_data = {} | ||
3506 | 72 | if includes is None: | ||
3507 | 73 | echo_data = rdata.copy() | ||
3508 | 74 | for ex in ['private-address', 'public-address']: | ||
3509 | 75 | if ex in echo_data: | ||
3510 | 76 | echo_data.pop(ex) | ||
3511 | 77 | else: | ||
3512 | 78 | for attribute, value in rdata.iteritems(): | ||
3513 | 79 | for include in includes: | ||
3514 | 80 | if include in attribute: | ||
3515 | 81 | echo_data[attribute] = value | ||
3516 | 82 | if len(echo_data) > 0: | ||
3517 | 83 | relation_set(relation_settings=echo_data) | ||
3518 | 84 | 0 | ||
3519 | === removed directory 'hooks/charmhelpers/contrib/python' | |||
3520 | === removed file 'hooks/charmhelpers/contrib/python/__init__.py' | |||
3521 | === removed file 'hooks/charmhelpers/contrib/python/packages.py' | |||
3522 | --- hooks/charmhelpers/contrib/python/packages.py 2014-05-09 20:11:59 +0000 | |||
3523 | +++ hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000 | |||
3524 | @@ -1,76 +0,0 @@ | |||
3525 | 1 | #!/usr/bin/env python | ||
3526 | 2 | # coding: utf-8 | ||
3527 | 3 | |||
3528 | 4 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
3529 | 5 | |||
3530 | 6 | from charmhelpers.fetch import apt_install | ||
3531 | 7 | from charmhelpers.core.hookenv import log | ||
3532 | 8 | |||
3533 | 9 | try: | ||
3534 | 10 | from pip import main as pip_execute | ||
3535 | 11 | except ImportError: | ||
3536 | 12 | apt_install('python-pip') | ||
3537 | 13 | from pip import main as pip_execute | ||
3538 | 14 | |||
3539 | 15 | |||
3540 | 16 | def parse_options(given, available): | ||
3541 | 17 | """Given a set of options, check if available""" | ||
3542 | 18 | for key, value in given.items(): | ||
3543 | 19 | if key in available: | ||
3544 | 20 | yield "--{0}={1}".format(key, value) | ||
3545 | 21 | |||
3546 | 22 | |||
3547 | 23 | def pip_install_requirements(requirements, **options): | ||
3548 | 24 | """Install a requirements file """ | ||
3549 | 25 | command = ["install"] | ||
3550 | 26 | |||
3551 | 27 | available_options = ('proxy', 'src', 'log', ) | ||
3552 | 28 | for option in parse_options(options, available_options): | ||
3553 | 29 | command.append(option) | ||
3554 | 30 | |||
3555 | 31 | command.append("-r {0}".format(requirements)) | ||
3556 | 32 | log("Installing from file: {} with options: {}".format(requirements, | ||
3557 | 33 | command)) | ||
3558 | 34 | pip_execute(command) | ||
3559 | 35 | |||
3560 | 36 | |||
3561 | 37 | def pip_install(package, fatal=False, **options): | ||
3562 | 38 | """Install a python package""" | ||
3563 | 39 | command = ["install"] | ||
3564 | 40 | |||
3565 | 41 | available_options = ('proxy', 'src', 'log', "index-url", ) | ||
3566 | 42 | for option in parse_options(options, available_options): | ||
3567 | 43 | command.append(option) | ||
3568 | 44 | |||
3569 | 45 | if isinstance(package, list): | ||
3570 | 46 | command.extend(package) | ||
3571 | 47 | else: | ||
3572 | 48 | command.append(package) | ||
3573 | 49 | |||
3574 | 50 | log("Installing {} package with options: {}".format(package, | ||
3575 | 51 | command)) | ||
3576 | 52 | pip_execute(command) | ||
3577 | 53 | |||
3578 | 54 | |||
3579 | 55 | def pip_uninstall(package, **options): | ||
3580 | 56 | """Uninstall a python package""" | ||
3581 | 57 | command = ["uninstall", "-q", "-y"] | ||
3582 | 58 | |||
3583 | 59 | available_options = ('proxy', 'log', ) | ||
3584 | 60 | for option in parse_options(options, available_options): | ||
3585 | 61 | command.append(option) | ||
3586 | 62 | |||
3587 | 63 | if isinstance(package, list): | ||
3588 | 64 | command.extend(package) | ||
3589 | 65 | else: | ||
3590 | 66 | command.append(package) | ||
3591 | 67 | |||
3592 | 68 | log("Uninstalling {} package with options: {}".format(package, | ||
3593 | 69 | command)) | ||
3594 | 70 | pip_execute(command) | ||
3595 | 71 | |||
3596 | 72 | |||
3597 | 73 | def pip_list(): | ||
3598 | 74 | """Returns the list of current python installed packages | ||
3599 | 75 | """ | ||
3600 | 76 | return pip_execute(["list"]) | ||
3601 | 77 | 0 | ||
3602 | === removed file 'hooks/charmhelpers/contrib/python/version.py' | |||
3603 | --- hooks/charmhelpers/contrib/python/version.py 2014-05-09 20:11:59 +0000 | |||
3604 | +++ hooks/charmhelpers/contrib/python/version.py 1970-01-01 00:00:00 +0000 | |||
3605 | @@ -1,18 +0,0 @@ | |||
3606 | 1 | #!/usr/bin/env python | ||
3607 | 2 | # coding: utf-8 | ||
3608 | 3 | |||
3609 | 4 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
3610 | 5 | |||
3611 | 6 | import sys | ||
3612 | 7 | |||
3613 | 8 | |||
3614 | 9 | def current_version(): | ||
3615 | 10 | """Current system python version""" | ||
3616 | 11 | return sys.version_info | ||
3617 | 12 | |||
3618 | 13 | |||
3619 | 14 | def current_version_string(): | ||
3620 | 15 | """Current system python version as string major.minor.micro""" | ||
3621 | 16 | return "{0}.{1}.{2}".format(sys.version_info.major, | ||
3622 | 17 | sys.version_info.minor, | ||
3623 | 18 | sys.version_info.micro) | ||
3624 | 19 | 0 | ||
3625 | === removed directory 'hooks/charmhelpers/contrib/saltstack' | |||
3626 | === removed file 'hooks/charmhelpers/contrib/saltstack/__init__.py' | |||
3627 | --- hooks/charmhelpers/contrib/saltstack/__init__.py 2013-11-26 17:12:54 +0000 | |||
3628 | +++ hooks/charmhelpers/contrib/saltstack/__init__.py 1970-01-01 00:00:00 +0000 | |||
3629 | @@ -1,102 +0,0 @@ | |||
3630 | 1 | """Charm Helpers saltstack - declare the state of your machines. | ||
3631 | 2 | |||
3632 | 3 | This helper enables you to declare your machine state, rather than | ||
3633 | 4 | program it procedurally (and have to test each change to your procedures). | ||
3634 | 5 | Your install hook can be as simple as: | ||
3635 | 6 | |||
3636 | 7 | {{{ | ||
3637 | 8 | from charmhelpers.contrib.saltstack import ( | ||
3638 | 9 | install_salt_support, | ||
3639 | 10 | update_machine_state, | ||
3640 | 11 | ) | ||
3641 | 12 | |||
3642 | 13 | |||
3643 | 14 | def install(): | ||
3644 | 15 | install_salt_support() | ||
3645 | 16 | update_machine_state('machine_states/dependencies.yaml') | ||
3646 | 17 | update_machine_state('machine_states/installed.yaml') | ||
3647 | 18 | }}} | ||
3648 | 19 | |||
3649 | 20 | and won't need to change (nor will its tests) when you change the machine | ||
3650 | 21 | state. | ||
3651 | 22 | |||
3652 | 23 | It's using a python package called salt-minion which allows various formats for | ||
3653 | 24 | specifying resources, such as: | ||
3654 | 25 | |||
3655 | 26 | {{{ | ||
3656 | 27 | /srv/{{ basedir }}: | ||
3657 | 28 | file.directory: | ||
3658 | 29 | - group: ubunet | ||
3659 | 30 | - user: ubunet | ||
3660 | 31 | - require: | ||
3661 | 32 | - user: ubunet | ||
3662 | 33 | - recurse: | ||
3663 | 34 | - user | ||
3664 | 35 | - group | ||
3665 | 36 | |||
3666 | 37 | ubunet: | ||
3667 | 38 | group.present: | ||
3668 | 39 | - gid: 1500 | ||
3669 | 40 | user.present: | ||
3670 | 41 | - uid: 1500 | ||
3671 | 42 | - gid: 1500 | ||
3672 | 43 | - createhome: False | ||
3673 | 44 | - require: | ||
3674 | 45 | - group: ubunet | ||
3675 | 46 | }}} | ||
3676 | 47 | |||
3677 | 48 | The docs for all the different state definitions are at: | ||
3678 | 49 | http://docs.saltstack.com/ref/states/all/ | ||
3679 | 50 | |||
3680 | 51 | |||
3681 | 52 | TODO: | ||
3682 | 53 | * Add test helpers which will ensure that machine state definitions | ||
3683 | 54 | are functionally (but not necessarily logically) correct (ie. getting | ||
3684 | 55 | salt to parse all state defs. | ||
3685 | 56 | * Add a link to a public bootstrap charm example / blogpost. | ||
3686 | 57 | * Find a way to obviate the need to use the grains['charm_dir'] syntax | ||
3687 | 58 | in templates. | ||
3688 | 59 | """ | ||
3689 | 60 | # Copyright 2013 Canonical Ltd. | ||
3690 | 61 | # | ||
3691 | 62 | # Authors: | ||
3692 | 63 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
3693 | 64 | import subprocess | ||
3694 | 65 | |||
3695 | 66 | import charmhelpers.contrib.templating.contexts | ||
3696 | 67 | import charmhelpers.core.host | ||
3697 | 68 | import charmhelpers.core.hookenv | ||
3698 | 69 | |||
3699 | 70 | |||
3700 | 71 | salt_grains_path = '/etc/salt/grains' | ||
3701 | 72 | |||
3702 | 73 | |||
3703 | 74 | def install_salt_support(from_ppa=True): | ||
3704 | 75 | """Installs the salt-minion helper for machine state. | ||
3705 | 76 | |||
3706 | 77 | By default the salt-minion package is installed from | ||
3707 | 78 | the saltstack PPA. If from_ppa is False you must ensure | ||
3708 | 79 | that the salt-minion package is available in the apt cache. | ||
3709 | 80 | """ | ||
3710 | 81 | if from_ppa: | ||
3711 | 82 | subprocess.check_call([ | ||
3712 | 83 | '/usr/bin/add-apt-repository', | ||
3713 | 84 | '--yes', | ||
3714 | 85 | 'ppa:saltstack/salt', | ||
3715 | 86 | ]) | ||
3716 | 87 | subprocess.check_call(['/usr/bin/apt-get', 'update']) | ||
3717 | 88 | # We install salt-common as salt-minion would run the salt-minion | ||
3718 | 89 | # daemon. | ||
3719 | 90 | charmhelpers.fetch.apt_install('salt-common') | ||
3720 | 91 | |||
3721 | 92 | |||
3722 | 93 | def update_machine_state(state_path): | ||
3723 | 94 | """Update the machine state using the provided state declaration.""" | ||
3724 | 95 | charmhelpers.contrib.templating.contexts.juju_state_to_yaml( | ||
3725 | 96 | salt_grains_path) | ||
3726 | 97 | subprocess.check_call([ | ||
3727 | 98 | 'salt-call', | ||
3728 | 99 | '--local', | ||
3729 | 100 | 'state.template', | ||
3730 | 101 | state_path, | ||
3731 | 102 | ]) | ||
3732 | 103 | 0 | ||
3733 | === removed directory 'hooks/charmhelpers/contrib/ssl' | |||
3734 | === removed file 'hooks/charmhelpers/contrib/ssl/__init__.py' | |||
3735 | --- hooks/charmhelpers/contrib/ssl/__init__.py 2013-11-26 17:12:54 +0000 | |||
3736 | +++ hooks/charmhelpers/contrib/ssl/__init__.py 1970-01-01 00:00:00 +0000 | |||
3737 | @@ -1,78 +0,0 @@ | |||
3738 | 1 | import subprocess | ||
3739 | 2 | from charmhelpers.core import hookenv | ||
3740 | 3 | |||
3741 | 4 | |||
3742 | 5 | def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None): | ||
3743 | 6 | """Generate selfsigned SSL keypair | ||
3744 | 7 | |||
3745 | 8 | You must provide one of the 3 optional arguments: | ||
3746 | 9 | config, subject or cn | ||
3747 | 10 | If more than one is provided the leftmost will be used | ||
3748 | 11 | |||
3749 | 12 | Arguments: | ||
3750 | 13 | keyfile -- (required) full path to the keyfile to be created | ||
3751 | 14 | certfile -- (required) full path to the certfile to be created | ||
3752 | 15 | keysize -- (optional) SSL key length | ||
3753 | 16 | config -- (optional) openssl configuration file | ||
3754 | 17 | subject -- (optional) dictionary with SSL subject variables | ||
3755 | 18 | cn -- (optional) cerfificate common name | ||
3756 | 19 | |||
3757 | 20 | Required keys in subject dict: | ||
3758 | 21 | cn -- Common name (eq. FQDN) | ||
3759 | 22 | |||
3760 | 23 | Optional keys in subject dict | ||
3761 | 24 | country -- Country Name (2 letter code) | ||
3762 | 25 | state -- State or Province Name (full name) | ||
3763 | 26 | locality -- Locality Name (eg, city) | ||
3764 | 27 | organization -- Organization Name (eg, company) | ||
3765 | 28 | organizational_unit -- Organizational Unit Name (eg, section) | ||
3766 | 29 | email -- Email Address | ||
3767 | 30 | """ | ||
3768 | 31 | |||
3769 | 32 | cmd = [] | ||
3770 | 33 | if config: | ||
3771 | 34 | cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", | ||
3772 | 35 | "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", | ||
3773 | 36 | "-keyout", keyfile, | ||
3774 | 37 | "-out", certfile, "-config", config] | ||
3775 | 38 | elif subject: | ||
3776 | 39 | ssl_subject = "" | ||
3777 | 40 | if "country" in subject: | ||
3778 | 41 | ssl_subject = ssl_subject + "/C={}".format(subject["country"]) | ||
3779 | 42 | if "state" in subject: | ||
3780 | 43 | ssl_subject = ssl_subject + "/ST={}".format(subject["state"]) | ||
3781 | 44 | if "locality" in subject: | ||
3782 | 45 | ssl_subject = ssl_subject + "/L={}".format(subject["locality"]) | ||
3783 | 46 | if "organization" in subject: | ||
3784 | 47 | ssl_subject = ssl_subject + "/O={}".format(subject["organization"]) | ||
3785 | 48 | if "organizational_unit" in subject: | ||
3786 | 49 | ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"]) | ||
3787 | 50 | if "cn" in subject: | ||
3788 | 51 | ssl_subject = ssl_subject + "/CN={}".format(subject["cn"]) | ||
3789 | 52 | else: | ||
3790 | 53 | hookenv.log("When using \"subject\" argument you must " | ||
3791 | 54 | "provide \"cn\" field at very least") | ||
3792 | 55 | return False | ||
3793 | 56 | if "email" in subject: | ||
3794 | 57 | ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"]) | ||
3795 | 58 | |||
3796 | 59 | cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", | ||
3797 | 60 | "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", | ||
3798 | 61 | "-keyout", keyfile, | ||
3799 | 62 | "-out", certfile, "-subj", ssl_subject] | ||
3800 | 63 | elif cn: | ||
3801 | 64 | cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", | ||
3802 | 65 | "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", | ||
3803 | 66 | "-keyout", keyfile, | ||
3804 | 67 | "-out", certfile, "-subj", "/CN={}".format(cn)] | ||
3805 | 68 | |||
3806 | 69 | if not cmd: | ||
3807 | 70 | hookenv.log("No config, subject or cn provided," | ||
3808 | 71 | "unable to generate self signed SSL certificates") | ||
3809 | 72 | return False | ||
3810 | 73 | try: | ||
3811 | 74 | subprocess.check_call(cmd) | ||
3812 | 75 | return True | ||
3813 | 76 | except Exception as e: | ||
3814 | 77 | print "Execution of openssl command failed:\n{}".format(e) | ||
3815 | 78 | return False | ||
3816 | 79 | 0 | ||
3817 | === removed file 'hooks/charmhelpers/contrib/ssl/service.py' | |||
3818 | --- hooks/charmhelpers/contrib/ssl/service.py 2014-05-09 20:11:59 +0000 | |||
3819 | +++ hooks/charmhelpers/contrib/ssl/service.py 1970-01-01 00:00:00 +0000 | |||
3820 | @@ -1,267 +0,0 @@ | |||
3821 | 1 | import logging | ||
3822 | 2 | import os | ||
3823 | 3 | from os.path import join as path_join | ||
3824 | 4 | from os.path import exists | ||
3825 | 5 | import subprocess | ||
3826 | 6 | |||
3827 | 7 | |||
3828 | 8 | log = logging.getLogger("service_ca") | ||
3829 | 9 | |||
3830 | 10 | logging.basicConfig(level=logging.DEBUG) | ||
3831 | 11 | |||
3832 | 12 | STD_CERT = "standard" | ||
3833 | 13 | |||
3834 | 14 | # Mysql server is fairly picky about cert creation | ||
3835 | 15 | # and types, spec its creation separately for now. | ||
3836 | 16 | MYSQL_CERT = "mysql" | ||
3837 | 17 | |||
3838 | 18 | |||
3839 | 19 | class ServiceCA(object): | ||
3840 | 20 | |||
3841 | 21 | default_expiry = str(365 * 2) | ||
3842 | 22 | default_ca_expiry = str(365 * 6) | ||
3843 | 23 | |||
3844 | 24 | def __init__(self, name, ca_dir, cert_type=STD_CERT): | ||
3845 | 25 | self.name = name | ||
3846 | 26 | self.ca_dir = ca_dir | ||
3847 | 27 | self.cert_type = cert_type | ||
3848 | 28 | |||
3849 | 29 | ############### | ||
3850 | 30 | # Hook Helper API | ||
3851 | 31 | @staticmethod | ||
3852 | 32 | def get_ca(type=STD_CERT): | ||
3853 | 33 | service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] | ||
3854 | 34 | ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca') | ||
3855 | 35 | ca = ServiceCA(service_name, ca_path, type) | ||
3856 | 36 | ca.init() | ||
3857 | 37 | return ca | ||
3858 | 38 | |||
3859 | 39 | @classmethod | ||
3860 | 40 | def get_service_cert(cls, type=STD_CERT): | ||
3861 | 41 | service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] | ||
3862 | 42 | ca = cls.get_ca() | ||
3863 | 43 | crt, key = ca.get_or_create_cert(service_name) | ||
3864 | 44 | return crt, key, ca.get_ca_bundle() | ||
3865 | 45 | |||
3866 | 46 | ############### | ||
3867 | 47 | |||
3868 | 48 | def init(self): | ||
3869 | 49 | log.debug("initializing service ca") | ||
3870 | 50 | if not exists(self.ca_dir): | ||
3871 | 51 | self._init_ca_dir(self.ca_dir) | ||
3872 | 52 | self._init_ca() | ||
3873 | 53 | |||
3874 | 54 | @property | ||
3875 | 55 | def ca_key(self): | ||
3876 | 56 | return path_join(self.ca_dir, 'private', 'cacert.key') | ||
3877 | 57 | |||
3878 | 58 | @property | ||
3879 | 59 | def ca_cert(self): | ||
3880 | 60 | return path_join(self.ca_dir, 'cacert.pem') | ||
3881 | 61 | |||
3882 | 62 | @property | ||
3883 | 63 | def ca_conf(self): | ||
3884 | 64 | return path_join(self.ca_dir, 'ca.cnf') | ||
3885 | 65 | |||
3886 | 66 | @property | ||
3887 | 67 | def signing_conf(self): | ||
3888 | 68 | return path_join(self.ca_dir, 'signing.cnf') | ||
3889 | 69 | |||
3890 | 70 | def _init_ca_dir(self, ca_dir): | ||
3891 | 71 | os.mkdir(ca_dir) | ||
3892 | 72 | for i in ['certs', 'crl', 'newcerts', 'private']: | ||
3893 | 73 | sd = path_join(ca_dir, i) | ||
3894 | 74 | if not exists(sd): | ||
3895 | 75 | os.mkdir(sd) | ||
3896 | 76 | |||
3897 | 77 | if not exists(path_join(ca_dir, 'serial')): | ||
3898 | 78 | with open(path_join(ca_dir, 'serial'), 'wb') as fh: | ||
3899 | 79 | fh.write('02\n') | ||
3900 | 80 | |||
3901 | 81 | if not exists(path_join(ca_dir, 'index.txt')): | ||
3902 | 82 | with open(path_join(ca_dir, 'index.txt'), 'wb') as fh: | ||
3903 | 83 | fh.write('') | ||
3904 | 84 | |||
3905 | 85 | def _init_ca(self): | ||
3906 | 86 | """Generate the root ca's cert and key. | ||
3907 | 87 | """ | ||
3908 | 88 | if not exists(path_join(self.ca_dir, 'ca.cnf')): | ||
3909 | 89 | with open(path_join(self.ca_dir, 'ca.cnf'), 'wb') as fh: | ||
3910 | 90 | fh.write( | ||
3911 | 91 | CA_CONF_TEMPLATE % (self.get_conf_variables())) | ||
3912 | 92 | |||
3913 | 93 | if not exists(path_join(self.ca_dir, 'signing.cnf')): | ||
3914 | 94 | with open(path_join(self.ca_dir, 'signing.cnf'), 'wb') as fh: | ||
3915 | 95 | fh.write( | ||
3916 | 96 | SIGNING_CONF_TEMPLATE % (self.get_conf_variables())) | ||
3917 | 97 | |||
3918 | 98 | if exists(self.ca_cert) or exists(self.ca_key): | ||
3919 | 99 | raise RuntimeError("Initialized called when CA already exists") | ||
3920 | 100 | cmd = ['openssl', 'req', '-config', self.ca_conf, | ||
3921 | 101 | '-x509', '-nodes', '-newkey', 'rsa', | ||
3922 | 102 | '-days', self.default_ca_expiry, | ||
3923 | 103 | '-keyout', self.ca_key, '-out', self.ca_cert, | ||
3924 | 104 | '-outform', 'PEM'] | ||
3925 | 105 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
3926 | 106 | log.debug("CA Init:\n %s", output) | ||
3927 | 107 | |||
3928 | 108 | def get_conf_variables(self): | ||
3929 | 109 | return dict( | ||
3930 | 110 | org_name="juju", | ||
3931 | 111 | org_unit_name="%s service" % self.name, | ||
3932 | 112 | common_name=self.name, | ||
3933 | 113 | ca_dir=self.ca_dir) | ||
3934 | 114 | |||
3935 | 115 | def get_or_create_cert(self, common_name): | ||
3936 | 116 | if common_name in self: | ||
3937 | 117 | return self.get_certificate(common_name) | ||
3938 | 118 | return self.create_certificate(common_name) | ||
3939 | 119 | |||
3940 | 120 | def create_certificate(self, common_name): | ||
3941 | 121 | if common_name in self: | ||
3942 | 122 | return self.get_certificate(common_name) | ||
3943 | 123 | key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) | ||
3944 | 124 | crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) | ||
3945 | 125 | csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name) | ||
3946 | 126 | self._create_certificate(common_name, key_p, csr_p, crt_p) | ||
3947 | 127 | return self.get_certificate(common_name) | ||
3948 | 128 | |||
3949 | 129 | def get_certificate(self, common_name): | ||
3950 | 130 | if not common_name in self: | ||
3951 | 131 | raise ValueError("No certificate for %s" % common_name) | ||
3952 | 132 | key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) | ||
3953 | 133 | crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) | ||
3954 | 134 | with open(crt_p) as fh: | ||
3955 | 135 | crt = fh.read() | ||
3956 | 136 | with open(key_p) as fh: | ||
3957 | 137 | key = fh.read() | ||
3958 | 138 | return crt, key | ||
3959 | 139 | |||
3960 | 140 | def __contains__(self, common_name): | ||
3961 | 141 | crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) | ||
3962 | 142 | return exists(crt_p) | ||
3963 | 143 | |||
3964 | 144 | def _create_certificate(self, common_name, key_p, csr_p, crt_p): | ||
3965 | 145 | template_vars = self.get_conf_variables() | ||
3966 | 146 | template_vars['common_name'] = common_name | ||
3967 | 147 | subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % ( | ||
3968 | 148 | template_vars) | ||
3969 | 149 | |||
3970 | 150 | log.debug("CA Create Cert %s", common_name) | ||
3971 | 151 | cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048', | ||
3972 | 152 | '-nodes', '-days', self.default_expiry, | ||
3973 | 153 | '-keyout', key_p, '-out', csr_p, '-subj', subj] | ||
3974 | 154 | subprocess.check_call(cmd) | ||
3975 | 155 | cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p] | ||
3976 | 156 | subprocess.check_call(cmd) | ||
3977 | 157 | |||
3978 | 158 | log.debug("CA Sign Cert %s", common_name) | ||
3979 | 159 | if self.cert_type == MYSQL_CERT: | ||
3980 | 160 | cmd = ['openssl', 'x509', '-req', | ||
3981 | 161 | '-in', csr_p, '-days', self.default_expiry, | ||
3982 | 162 | '-CA', self.ca_cert, '-CAkey', self.ca_key, | ||
3983 | 163 | '-set_serial', '01', '-out', crt_p] | ||
3984 | 164 | else: | ||
3985 | 165 | cmd = ['openssl', 'ca', '-config', self.signing_conf, | ||
3986 | 166 | '-extensions', 'req_extensions', | ||
3987 | 167 | '-days', self.default_expiry, '-notext', | ||
3988 | 168 | '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch'] | ||
3989 | 169 | log.debug("running %s", " ".join(cmd)) | ||
3990 | 170 | subprocess.check_call(cmd) | ||
3991 | 171 | |||
3992 | 172 | def get_ca_bundle(self): | ||
3993 | 173 | with open(self.ca_cert) as fh: | ||
3994 | 174 | return fh.read() | ||
3995 | 175 | |||
3996 | 176 | |||
3997 | 177 | CA_CONF_TEMPLATE = """ | ||
3998 | 178 | [ ca ] | ||
3999 | 179 | default_ca = CA_default | ||
4000 | 180 | |||
4001 | 181 | [ CA_default ] | ||
4002 | 182 | dir = %(ca_dir)s | ||
4003 | 183 | policy = policy_match | ||
4004 | 184 | database = $dir/index.txt | ||
4005 | 185 | serial = $dir/serial | ||
4006 | 186 | certs = $dir/certs | ||
4007 | 187 | crl_dir = $dir/crl | ||
4008 | 188 | new_certs_dir = $dir/newcerts | ||
4009 | 189 | certificate = $dir/cacert.pem | ||
4010 | 190 | private_key = $dir/private/cacert.key | ||
4011 | 191 | RANDFILE = $dir/private/.rand | ||
4012 | 192 | default_md = default | ||
4013 | 193 | |||
4014 | 194 | [ req ] | ||
4015 | 195 | default_bits = 1024 | ||
4016 | 196 | default_md = sha1 | ||
4017 | 197 | |||
4018 | 198 | prompt = no | ||
4019 | 199 | distinguished_name = ca_distinguished_name | ||
4020 | 200 | |||
4021 | 201 | x509_extensions = ca_extensions | ||
4022 | 202 | |||
4023 | 203 | [ ca_distinguished_name ] | ||
4024 | 204 | organizationName = %(org_name)s | ||
4025 | 205 | organizationalUnitName = %(org_unit_name)s Certificate Authority | ||
4026 | 206 | |||
4027 | 207 | |||
4028 | 208 | [ policy_match ] | ||
4029 | 209 | countryName = optional | ||
4030 | 210 | stateOrProvinceName = optional | ||
4031 | 211 | organizationName = match | ||
4032 | 212 | organizationalUnitName = optional | ||
4033 | 213 | commonName = supplied | ||
4034 | 214 | |||
4035 | 215 | [ ca_extensions ] | ||
4036 | 216 | basicConstraints = critical,CA:true | ||
4037 | 217 | subjectKeyIdentifier = hash | ||
4038 | 218 | authorityKeyIdentifier = keyid:always, issuer | ||
4039 | 219 | keyUsage = cRLSign, keyCertSign | ||
4040 | 220 | """ | ||
4041 | 221 | |||
4042 | 222 | |||
4043 | 223 | SIGNING_CONF_TEMPLATE = """ | ||
4044 | 224 | [ ca ] | ||
4045 | 225 | default_ca = CA_default | ||
4046 | 226 | |||
4047 | 227 | [ CA_default ] | ||
4048 | 228 | dir = %(ca_dir)s | ||
4049 | 229 | policy = policy_match | ||
4050 | 230 | database = $dir/index.txt | ||
4051 | 231 | serial = $dir/serial | ||
4052 | 232 | certs = $dir/certs | ||
4053 | 233 | crl_dir = $dir/crl | ||
4054 | 234 | new_certs_dir = $dir/newcerts | ||
4055 | 235 | certificate = $dir/cacert.pem | ||
4056 | 236 | private_key = $dir/private/cacert.key | ||
4057 | 237 | RANDFILE = $dir/private/.rand | ||
4058 | 238 | default_md = default | ||
4059 | 239 | |||
4060 | 240 | [ req ] | ||
4061 | 241 | default_bits = 1024 | ||
4062 | 242 | default_md = sha1 | ||
4063 | 243 | |||
4064 | 244 | prompt = no | ||
4065 | 245 | distinguished_name = req_distinguished_name | ||
4066 | 246 | |||
4067 | 247 | x509_extensions = req_extensions | ||
4068 | 248 | |||
4069 | 249 | [ req_distinguished_name ] | ||
4070 | 250 | organizationName = %(org_name)s | ||
4071 | 251 | organizationalUnitName = %(org_unit_name)s machine resources | ||
4072 | 252 | commonName = %(common_name)s | ||
4073 | 253 | |||
4074 | 254 | [ policy_match ] | ||
4075 | 255 | countryName = optional | ||
4076 | 256 | stateOrProvinceName = optional | ||
4077 | 257 | organizationName = match | ||
4078 | 258 | organizationalUnitName = optional | ||
4079 | 259 | commonName = supplied | ||
4080 | 260 | |||
4081 | 261 | [ req_extensions ] | ||
4082 | 262 | basicConstraints = CA:false | ||
4083 | 263 | subjectKeyIdentifier = hash | ||
4084 | 264 | authorityKeyIdentifier = keyid:always, issuer | ||
4085 | 265 | keyUsage = digitalSignature, keyEncipherment, keyAgreement | ||
4086 | 266 | extendedKeyUsage = serverAuth, clientAuth | ||
4087 | 267 | """ | ||
4088 | 268 | 0 | ||
4089 | === removed directory 'hooks/charmhelpers/contrib/storage' | |||
4090 | === removed file 'hooks/charmhelpers/contrib/storage/__init__.py' | |||
4091 | === removed directory 'hooks/charmhelpers/contrib/storage/linux' | |||
4092 | === removed file 'hooks/charmhelpers/contrib/storage/linux/__init__.py' | |||
4093 | === removed file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
4094 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-05-09 20:11:59 +0000 | |||
4095 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000 | |||
4096 | @@ -1,387 +0,0 @@ | |||
4097 | 1 | # | ||
4098 | 2 | # Copyright 2012 Canonical Ltd. | ||
4099 | 3 | # | ||
4100 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
4101 | 5 | # | ||
4102 | 6 | # Authors: | ||
4103 | 7 | # James Page <james.page@ubuntu.com> | ||
4104 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
4105 | 9 | # | ||
4106 | 10 | |||
4107 | 11 | import os | ||
4108 | 12 | import shutil | ||
4109 | 13 | import json | ||
4110 | 14 | import time | ||
4111 | 15 | |||
4112 | 16 | from subprocess import ( | ||
4113 | 17 | check_call, | ||
4114 | 18 | check_output, | ||
4115 | 19 | CalledProcessError | ||
4116 | 20 | ) | ||
4117 | 21 | |||
4118 | 22 | from charmhelpers.core.hookenv import ( | ||
4119 | 23 | relation_get, | ||
4120 | 24 | relation_ids, | ||
4121 | 25 | related_units, | ||
4122 | 26 | log, | ||
4123 | 27 | INFO, | ||
4124 | 28 | WARNING, | ||
4125 | 29 | ERROR | ||
4126 | 30 | ) | ||
4127 | 31 | |||
4128 | 32 | from charmhelpers.core.host import ( | ||
4129 | 33 | mount, | ||
4130 | 34 | mounts, | ||
4131 | 35 | service_start, | ||
4132 | 36 | service_stop, | ||
4133 | 37 | service_running, | ||
4134 | 38 | umount, | ||
4135 | 39 | ) | ||
4136 | 40 | |||
4137 | 41 | from charmhelpers.fetch import ( | ||
4138 | 42 | apt_install, | ||
4139 | 43 | ) | ||
4140 | 44 | |||
4141 | 45 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' | ||
4142 | 46 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | ||
4143 | 47 | |||
4144 | 48 | CEPH_CONF = """[global] | ||
4145 | 49 | auth supported = {auth} | ||
4146 | 50 | keyring = {keyring} | ||
4147 | 51 | mon host = {mon_hosts} | ||
4148 | 52 | log to syslog = {use_syslog} | ||
4149 | 53 | err to syslog = {use_syslog} | ||
4150 | 54 | clog to syslog = {use_syslog} | ||
4151 | 55 | """ | ||
4152 | 56 | |||
4153 | 57 | |||
4154 | 58 | def install(): | ||
4155 | 59 | ''' Basic Ceph client installation ''' | ||
4156 | 60 | ceph_dir = "/etc/ceph" | ||
4157 | 61 | if not os.path.exists(ceph_dir): | ||
4158 | 62 | os.mkdir(ceph_dir) | ||
4159 | 63 | apt_install('ceph-common', fatal=True) | ||
4160 | 64 | |||
4161 | 65 | |||
4162 | 66 | def rbd_exists(service, pool, rbd_img): | ||
4163 | 67 | ''' Check to see if a RADOS block device exists ''' | ||
4164 | 68 | try: | ||
4165 | 69 | out = check_output(['rbd', 'list', '--id', service, | ||
4166 | 70 | '--pool', pool]) | ||
4167 | 71 | except CalledProcessError: | ||
4168 | 72 | return False | ||
4169 | 73 | else: | ||
4170 | 74 | return rbd_img in out | ||
4171 | 75 | |||
4172 | 76 | |||
4173 | 77 | def create_rbd_image(service, pool, image, sizemb): | ||
4174 | 78 | ''' Create a new RADOS block device ''' | ||
4175 | 79 | cmd = [ | ||
4176 | 80 | 'rbd', | ||
4177 | 81 | 'create', | ||
4178 | 82 | image, | ||
4179 | 83 | '--size', | ||
4180 | 84 | str(sizemb), | ||
4181 | 85 | '--id', | ||
4182 | 86 | service, | ||
4183 | 87 | '--pool', | ||
4184 | 88 | pool | ||
4185 | 89 | ] | ||
4186 | 90 | check_call(cmd) | ||
4187 | 91 | |||
4188 | 92 | |||
4189 | 93 | def pool_exists(service, name): | ||
4190 | 94 | ''' Check to see if a RADOS pool already exists ''' | ||
4191 | 95 | try: | ||
4192 | 96 | out = check_output(['rados', '--id', service, 'lspools']) | ||
4193 | 97 | except CalledProcessError: | ||
4194 | 98 | return False | ||
4195 | 99 | else: | ||
4196 | 100 | return name in out | ||
4197 | 101 | |||
4198 | 102 | |||
4199 | 103 | def get_osds(service): | ||
4200 | 104 | ''' | ||
4201 | 105 | Return a list of all Ceph Object Storage Daemons | ||
4202 | 106 | currently in the cluster | ||
4203 | 107 | ''' | ||
4204 | 108 | version = ceph_version() | ||
4205 | 109 | if version and version >= '0.56': | ||
4206 | 110 | return json.loads(check_output(['ceph', '--id', service, | ||
4207 | 111 | 'osd', 'ls', '--format=json'])) | ||
4208 | 112 | else: | ||
4209 | 113 | return None | ||
4210 | 114 | |||
4211 | 115 | |||
4212 | 116 | def create_pool(service, name, replicas=2): | ||
4213 | 117 | ''' Create a new RADOS pool ''' | ||
4214 | 118 | if pool_exists(service, name): | ||
4215 | 119 | log("Ceph pool {} already exists, skipping creation".format(name), | ||
4216 | 120 | level=WARNING) | ||
4217 | 121 | return | ||
4218 | 122 | # Calculate the number of placement groups based | ||
4219 | 123 | # on upstream recommended best practices. | ||
4220 | 124 | osds = get_osds(service) | ||
4221 | 125 | if osds: | ||
4222 | 126 | pgnum = (len(osds) * 100 / replicas) | ||
4223 | 127 | else: | ||
4224 | 128 | # NOTE(james-page): Default to 200 for older ceph versions | ||
4225 | 129 | # which don't support OSD query from cli | ||
4226 | 130 | pgnum = 200 | ||
4227 | 131 | cmd = [ | ||
4228 | 132 | 'ceph', '--id', service, | ||
4229 | 133 | 'osd', 'pool', 'create', | ||
4230 | 134 | name, str(pgnum) | ||
4231 | 135 | ] | ||
4232 | 136 | check_call(cmd) | ||
4233 | 137 | cmd = [ | ||
4234 | 138 | 'ceph', '--id', service, | ||
4235 | 139 | 'osd', 'pool', 'set', name, | ||
4236 | 140 | 'size', str(replicas) | ||
4237 | 141 | ] | ||
4238 | 142 | check_call(cmd) | ||
4239 | 143 | |||
4240 | 144 | |||
4241 | 145 | def delete_pool(service, name): | ||
4242 | 146 | ''' Delete a RADOS pool from ceph ''' | ||
4243 | 147 | cmd = [ | ||
4244 | 148 | 'ceph', '--id', service, | ||
4245 | 149 | 'osd', 'pool', 'delete', | ||
4246 | 150 | name, '--yes-i-really-really-mean-it' | ||
4247 | 151 | ] | ||
4248 | 152 | check_call(cmd) | ||
4249 | 153 | |||
4250 | 154 | |||
4251 | 155 | def _keyfile_path(service): | ||
4252 | 156 | return KEYFILE.format(service) | ||
4253 | 157 | |||
4254 | 158 | |||
4255 | 159 | def _keyring_path(service): | ||
4256 | 160 | return KEYRING.format(service) | ||
4257 | 161 | |||
4258 | 162 | |||
4259 | 163 | def create_keyring(service, key): | ||
4260 | 164 | ''' Create a new Ceph keyring containing key''' | ||
4261 | 165 | keyring = _keyring_path(service) | ||
4262 | 166 | if os.path.exists(keyring): | ||
4263 | 167 | log('ceph: Keyring exists at %s.' % keyring, level=WARNING) | ||
4264 | 168 | return | ||
4265 | 169 | cmd = [ | ||
4266 | 170 | 'ceph-authtool', | ||
4267 | 171 | keyring, | ||
4268 | 172 | '--create-keyring', | ||
4269 | 173 | '--name=client.{}'.format(service), | ||
4270 | 174 | '--add-key={}'.format(key) | ||
4271 | 175 | ] | ||
4272 | 176 | check_call(cmd) | ||
4273 | 177 | log('ceph: Created new ring at %s.' % keyring, level=INFO) | ||
4274 | 178 | |||
4275 | 179 | |||
4276 | 180 | def create_key_file(service, key): | ||
4277 | 181 | ''' Create a file containing key ''' | ||
4278 | 182 | keyfile = _keyfile_path(service) | ||
4279 | 183 | if os.path.exists(keyfile): | ||
4280 | 184 | log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) | ||
4281 | 185 | return | ||
4282 | 186 | with open(keyfile, 'w') as fd: | ||
4283 | 187 | fd.write(key) | ||
4284 | 188 | log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) | ||
4285 | 189 | |||
4286 | 190 | |||
4287 | 191 | def get_ceph_nodes(): | ||
4288 | 192 | ''' Query named relation 'ceph' to detemine current nodes ''' | ||
4289 | 193 | hosts = [] | ||
4290 | 194 | for r_id in relation_ids('ceph'): | ||
4291 | 195 | for unit in related_units(r_id): | ||
4292 | 196 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | ||
4293 | 197 | return hosts | ||
4294 | 198 | |||
4295 | 199 | |||
4296 | 200 | def configure(service, key, auth, use_syslog): | ||
4297 | 201 | ''' Perform basic configuration of Ceph ''' | ||
4298 | 202 | create_keyring(service, key) | ||
4299 | 203 | create_key_file(service, key) | ||
4300 | 204 | hosts = get_ceph_nodes() | ||
4301 | 205 | with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: | ||
4302 | 206 | ceph_conf.write(CEPH_CONF.format(auth=auth, | ||
4303 | 207 | keyring=_keyring_path(service), | ||
4304 | 208 | mon_hosts=",".join(map(str, hosts)), | ||
4305 | 209 | use_syslog=use_syslog)) | ||
4306 | 210 | modprobe('rbd') | ||
4307 | 211 | |||
4308 | 212 | |||
4309 | 213 | def image_mapped(name): | ||
4310 | 214 | ''' Determine whether a RADOS block device is mapped locally ''' | ||
4311 | 215 | try: | ||
4312 | 216 | out = check_output(['rbd', 'showmapped']) | ||
4313 | 217 | except CalledProcessError: | ||
4314 | 218 | return False | ||
4315 | 219 | else: | ||
4316 | 220 | return name in out | ||
4317 | 221 | |||
4318 | 222 | |||
4319 | 223 | def map_block_storage(service, pool, image): | ||
4320 | 224 | ''' Map a RADOS block device for local use ''' | ||
4321 | 225 | cmd = [ | ||
4322 | 226 | 'rbd', | ||
4323 | 227 | 'map', | ||
4324 | 228 | '{}/{}'.format(pool, image), | ||
4325 | 229 | '--user', | ||
4326 | 230 | service, | ||
4327 | 231 | '--secret', | ||
4328 | 232 | _keyfile_path(service), | ||
4329 | 233 | ] | ||
4330 | 234 | check_call(cmd) | ||
4331 | 235 | |||
4332 | 236 | |||
4333 | 237 | def filesystem_mounted(fs): | ||
4334 | 238 | ''' Determine whether a filesytems is already mounted ''' | ||
4335 | 239 | return fs in [f for f, m in mounts()] | ||
4336 | 240 | |||
4337 | 241 | |||
4338 | 242 | def make_filesystem(blk_device, fstype='ext4', timeout=10): | ||
4339 | 243 | ''' Make a new filesystem on the specified block device ''' | ||
4340 | 244 | count = 0 | ||
4341 | 245 | e_noent = os.errno.ENOENT | ||
4342 | 246 | while not os.path.exists(blk_device): | ||
4343 | 247 | if count >= timeout: | ||
4344 | 248 | log('ceph: gave up waiting on block device %s' % blk_device, | ||
4345 | 249 | level=ERROR) | ||
4346 | 250 | raise IOError(e_noent, os.strerror(e_noent), blk_device) | ||
4347 | 251 | log('ceph: waiting for block device %s to appear' % blk_device, | ||
4348 | 252 | level=INFO) | ||
4349 | 253 | count += 1 | ||
4350 | 254 | time.sleep(1) | ||
4351 | 255 | else: | ||
4352 | 256 | log('ceph: Formatting block device %s as filesystem %s.' % | ||
4353 | 257 | (blk_device, fstype), level=INFO) | ||
4354 | 258 | check_call(['mkfs', '-t', fstype, blk_device]) | ||
4355 | 259 | |||
4356 | 260 | |||
4357 | 261 | def place_data_on_block_device(blk_device, data_src_dst): | ||
4358 | 262 | ''' Migrate data in data_src_dst to blk_device and then remount ''' | ||
4359 | 263 | # mount block device into /mnt | ||
4360 | 264 | mount(blk_device, '/mnt') | ||
4361 | 265 | # copy data to /mnt | ||
4362 | 266 | copy_files(data_src_dst, '/mnt') | ||
4363 | 267 | # umount block device | ||
4364 | 268 | umount('/mnt') | ||
4365 | 269 | # Grab user/group ID's from original source | ||
4366 | 270 | _dir = os.stat(data_src_dst) | ||
4367 | 271 | uid = _dir.st_uid | ||
4368 | 272 | gid = _dir.st_gid | ||
4369 | 273 | # re-mount where the data should originally be | ||
4370 | 274 | # TODO: persist is currently a NO-OP in core.host | ||
4371 | 275 | mount(blk_device, data_src_dst, persist=True) | ||
4372 | 276 | # ensure original ownership of new mount. | ||
4373 | 277 | os.chown(data_src_dst, uid, gid) | ||
4374 | 278 | |||
4375 | 279 | |||
4376 | 280 | # TODO: re-use | ||
4377 | 281 | def modprobe(module): | ||
4378 | 282 | ''' Load a kernel module and configure for auto-load on reboot ''' | ||
4379 | 283 | log('ceph: Loading kernel module', level=INFO) | ||
4380 | 284 | cmd = ['modprobe', module] | ||
4381 | 285 | check_call(cmd) | ||
4382 | 286 | with open('/etc/modules', 'r+') as modules: | ||
4383 | 287 | if module not in modules.read(): | ||
4384 | 288 | modules.write(module) | ||
4385 | 289 | |||
4386 | 290 | |||
4387 | 291 | def copy_files(src, dst, symlinks=False, ignore=None): | ||
4388 | 292 | ''' Copy files from src to dst ''' | ||
4389 | 293 | for item in os.listdir(src): | ||
4390 | 294 | s = os.path.join(src, item) | ||
4391 | 295 | d = os.path.join(dst, item) | ||
4392 | 296 | if os.path.isdir(s): | ||
4393 | 297 | shutil.copytree(s, d, symlinks, ignore) | ||
4394 | 298 | else: | ||
4395 | 299 | shutil.copy2(s, d) | ||
4396 | 300 | |||
4397 | 301 | |||
4398 | 302 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | ||
4399 | 303 | blk_device, fstype, system_services=[]): | ||
4400 | 304 | """ | ||
4401 | 305 | NOTE: This function must only be called from a single service unit for | ||
4402 | 306 | the same rbd_img otherwise data loss will occur. | ||
4403 | 307 | |||
4404 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | ||
4405 | 309 | and the device is formatted and mounted at the given mount_point. | ||
4406 | 310 | |||
4407 | 311 | If formatting a device for the first time, data existing at mount_point | ||
4408 | 312 | will be migrated to the RBD device before being re-mounted. | ||
4409 | 313 | |||
4410 | 314 | All services listed in system_services will be stopped prior to data | ||
4411 | 315 | migration and restarted when complete. | ||
4412 | 316 | """ | ||
4413 | 317 | # Ensure pool, RBD image, RBD mappings are in place. | ||
4414 | 318 | if not pool_exists(service, pool): | ||
4415 | 319 | log('ceph: Creating new pool {}.'.format(pool)) | ||
4416 | 320 | create_pool(service, pool) | ||
4417 | 321 | |||
4418 | 322 | if not rbd_exists(service, pool, rbd_img): | ||
4419 | 323 | log('ceph: Creating RBD image ({}).'.format(rbd_img)) | ||
4420 | 324 | create_rbd_image(service, pool, rbd_img, sizemb) | ||
4421 | 325 | |||
4422 | 326 | if not image_mapped(rbd_img): | ||
4423 | 327 | log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) | ||
4424 | 328 | map_block_storage(service, pool, rbd_img) | ||
4425 | 329 | |||
4426 | 330 | # make file system | ||
4427 | 331 | # TODO: What happens if for whatever reason this is run again and | ||
4428 | 332 | # the data is already in the rbd device and/or is mounted?? | ||
4429 | 333 | # When it is mounted already, it will fail to make the fs | ||
4430 | 334 | # XXX: This is really sketchy! Need to at least add an fstab entry | ||
4431 | 335 | # otherwise this hook will blow away existing data if its executed | ||
4432 | 336 | # after a reboot. | ||
4433 | 337 | if not filesystem_mounted(mount_point): | ||
4434 | 338 | make_filesystem(blk_device, fstype) | ||
4435 | 339 | |||
4436 | 340 | for svc in system_services: | ||
4437 | 341 | if service_running(svc): | ||
4438 | 342 | log('ceph: Stopping services {} prior to migrating data.' | ||
4439 | 343 | .format(svc)) | ||
4440 | 344 | service_stop(svc) | ||
4441 | 345 | |||
4442 | 346 | place_data_on_block_device(blk_device, mount_point) | ||
4443 | 347 | |||
4444 | 348 | for svc in system_services: | ||
4445 | 349 | log('ceph: Starting service {} after migrating data.' | ||
4446 | 350 | .format(svc)) | ||
4447 | 351 | service_start(svc) | ||
4448 | 352 | |||
4449 | 353 | |||
4450 | 354 | def ensure_ceph_keyring(service, user=None, group=None): | ||
4451 | 355 | ''' | ||
4452 | 356 | Ensures a ceph keyring is created for a named service | ||
4453 | 357 | and optionally ensures user and group ownership. | ||
4454 | 358 | |||
4455 | 359 | Returns False if no ceph key is available in relation state. | ||
4456 | 360 | ''' | ||
4457 | 361 | key = None | ||
4458 | 362 | for rid in relation_ids('ceph'): | ||
4459 | 363 | for unit in related_units(rid): | ||
4460 | 364 | key = relation_get('key', rid=rid, unit=unit) | ||
4461 | 365 | if key: | ||
4462 | 366 | break | ||
4463 | 367 | if not key: | ||
4464 | 368 | return False | ||
4465 | 369 | create_keyring(service=service, key=key) | ||
4466 | 370 | keyring = _keyring_path(service) | ||
4467 | 371 | if user and group: | ||
4468 | 372 | check_call(['chown', '%s.%s' % (user, group), keyring]) | ||
4469 | 373 | return True | ||
4470 | 374 | |||
4471 | 375 | |||
4472 | 376 | def ceph_version(): | ||
4473 | 377 | ''' Retrieve the local version of ceph ''' | ||
4474 | 378 | if os.path.exists('/usr/bin/ceph'): | ||
4475 | 379 | cmd = ['ceph', '-v'] | ||
4476 | 380 | output = check_output(cmd) | ||
4477 | 381 | output = output.split() | ||
4478 | 382 | if len(output) > 3: | ||
4479 | 383 | return output[2] | ||
4480 | 384 | else: | ||
4481 | 385 | return None | ||
4482 | 386 | else: | ||
4483 | 387 | return None | ||
4484 | 388 | 0 | ||
4485 | === removed file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
4486 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-11-26 17:12:54 +0000 | |||
4487 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000 | |||
4488 | @@ -1,62 +0,0 @@ | |||
4489 | 1 | |||
4490 | 2 | import os | ||
4491 | 3 | import re | ||
4492 | 4 | |||
4493 | 5 | from subprocess import ( | ||
4494 | 6 | check_call, | ||
4495 | 7 | check_output, | ||
4496 | 8 | ) | ||
4497 | 9 | |||
4498 | 10 | |||
4499 | 11 | ################################################## | ||
4500 | 12 | # loopback device helpers. | ||
4501 | 13 | ################################################## | ||
4502 | 14 | def loopback_devices(): | ||
4503 | 15 | ''' | ||
4504 | 16 | Parse through 'losetup -a' output to determine currently mapped | ||
4505 | 17 | loopback devices. Output is expected to look like: | ||
4506 | 18 | |||
4507 | 19 | /dev/loop0: [0807]:961814 (/tmp/my.img) | ||
4508 | 20 | |||
4509 | 21 | :returns: dict: a dict mapping {loopback_dev: backing_file} | ||
4510 | 22 | ''' | ||
4511 | 23 | loopbacks = {} | ||
4512 | 24 | cmd = ['losetup', '-a'] | ||
4513 | 25 | devs = [d.strip().split(' ') for d in | ||
4514 | 26 | check_output(cmd).splitlines() if d != ''] | ||
4515 | 27 | for dev, _, f in devs: | ||
4516 | 28 | loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] | ||
4517 | 29 | return loopbacks | ||
4518 | 30 | |||
4519 | 31 | |||
4520 | 32 | def create_loopback(file_path): | ||
4521 | 33 | ''' | ||
4522 | 34 | Create a loopback device for a given backing file. | ||
4523 | 35 | |||
4524 | 36 | :returns: str: Full path to new loopback device (eg, /dev/loop0) | ||
4525 | 37 | ''' | ||
4526 | 38 | file_path = os.path.abspath(file_path) | ||
4527 | 39 | check_call(['losetup', '--find', file_path]) | ||
4528 | 40 | for d, f in loopback_devices().iteritems(): | ||
4529 | 41 | if f == file_path: | ||
4530 | 42 | return d | ||
4531 | 43 | |||
4532 | 44 | |||
4533 | 45 | def ensure_loopback_device(path, size): | ||
4534 | 46 | ''' | ||
4535 | 47 | Ensure a loopback device exists for a given backing file path and size. | ||
4536 | 48 | If it a loopback device is not mapped to file, a new one will be created. | ||
4537 | 49 | |||
4538 | 50 | TODO: Confirm size of found loopback device. | ||
4539 | 51 | |||
4540 | 52 | :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) | ||
4541 | 53 | ''' | ||
4542 | 54 | for d, f in loopback_devices().iteritems(): | ||
4543 | 55 | if f == path: | ||
4544 | 56 | return d | ||
4545 | 57 | |||
4546 | 58 | if not os.path.exists(path): | ||
4547 | 59 | cmd = ['truncate', '--size', size, path] | ||
4548 | 60 | check_call(cmd) | ||
4549 | 61 | |||
4550 | 62 | return create_loopback(path) | ||
4551 | 63 | 0 | ||
4552 | === removed file 'hooks/charmhelpers/contrib/storage/linux/lvm.py' | |||
4553 | --- hooks/charmhelpers/contrib/storage/linux/lvm.py 2013-11-26 17:12:54 +0000 | |||
4554 | +++ hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000 | |||
4555 | @@ -1,88 +0,0 @@ | |||
4556 | 1 | from subprocess import ( | ||
4557 | 2 | CalledProcessError, | ||
4558 | 3 | check_call, | ||
4559 | 4 | check_output, | ||
4560 | 5 | Popen, | ||
4561 | 6 | PIPE, | ||
4562 | 7 | ) | ||
4563 | 8 | |||
4564 | 9 | |||
4565 | 10 | ################################################## | ||
4566 | 11 | # LVM helpers. | ||
4567 | 12 | ################################################## | ||
4568 | 13 | def deactivate_lvm_volume_group(block_device): | ||
4569 | 14 | ''' | ||
4570 | 15 | Deactivate any volume gruop associated with an LVM physical volume. | ||
4571 | 16 | |||
4572 | 17 | :param block_device: str: Full path to LVM physical volume | ||
4573 | 18 | ''' | ||
4574 | 19 | vg = list_lvm_volume_group(block_device) | ||
4575 | 20 | if vg: | ||
4576 | 21 | cmd = ['vgchange', '-an', vg] | ||
4577 | 22 | check_call(cmd) | ||
4578 | 23 | |||
4579 | 24 | |||
4580 | 25 | def is_lvm_physical_volume(block_device): | ||
4581 | 26 | ''' | ||
4582 | 27 | Determine whether a block device is initialized as an LVM PV. | ||
4583 | 28 | |||
4584 | 29 | :param block_device: str: Full path of block device to inspect. | ||
4585 | 30 | |||
4586 | 31 | :returns: boolean: True if block device is a PV, False if not. | ||
4587 | 32 | ''' | ||
4588 | 33 | try: | ||
4589 | 34 | check_output(['pvdisplay', block_device]) | ||
4590 | 35 | return True | ||
4591 | 36 | except CalledProcessError: | ||
4592 | 37 | return False | ||
4593 | 38 | |||
4594 | 39 | |||
4595 | 40 | def remove_lvm_physical_volume(block_device): | ||
4596 | 41 | ''' | ||
4597 | 42 | Remove LVM PV signatures from a given block device. | ||
4598 | 43 | |||
4599 | 44 | :param block_device: str: Full path of block device to scrub. | ||
4600 | 45 | ''' | ||
4601 | 46 | p = Popen(['pvremove', '-ff', block_device], | ||
4602 | 47 | stdin=PIPE) | ||
4603 | 48 | p.communicate(input='y\n') | ||
4604 | 49 | |||
4605 | 50 | |||
4606 | 51 | def list_lvm_volume_group(block_device): | ||
4607 | 52 | ''' | ||
4608 | 53 | List LVM volume group associated with a given block device. | ||
4609 | 54 | |||
4610 | 55 | Assumes block device is a valid LVM PV. | ||
4611 | 56 | |||
4612 | 57 | :param block_device: str: Full path of block device to inspect. | ||
4613 | 58 | |||
4614 | 59 | :returns: str: Name of volume group associated with block device or None | ||
4615 | 60 | ''' | ||
4616 | 61 | vg = None | ||
4617 | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() | ||
4618 | 63 | for l in pvd: | ||
4619 | 64 | if l.strip().startswith('VG Name'): | ||
4620 | 65 | vg = ' '.join(l.split()).split(' ').pop() | ||
4621 | 66 | return vg | ||
4622 | 67 | |||
4623 | 68 | |||
4624 | 69 | def create_lvm_physical_volume(block_device): | ||
4625 | 70 | ''' | ||
4626 | 71 | Initialize a block device as an LVM physical volume. | ||
4627 | 72 | |||
4628 | 73 | :param block_device: str: Full path of block device to initialize. | ||
4629 | 74 | |||
4630 | 75 | ''' | ||
4631 | 76 | check_call(['pvcreate', block_device]) | ||
4632 | 77 | |||
4633 | 78 | |||
4634 | 79 | def create_lvm_volume_group(volume_group, block_device): | ||
4635 | 80 | ''' | ||
4636 | 81 | Create an LVM volume group backed by a given block device. | ||
4637 | 82 | |||
4638 | 83 | Assumes block device has already been initialized as an LVM PV. | ||
4639 | 84 | |||
4640 | 85 | :param volume_group: str: Name of volume group to create. | ||
4641 | 86 | :block_device: str: Full path of PV-initialized block device. | ||
4642 | 87 | ''' | ||
4643 | 88 | check_call(['vgcreate', volume_group, block_device]) | ||
4644 | 89 | 0 | ||
4645 | === removed file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
4646 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-09 20:11:59 +0000 | |||
4647 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000 | |||
4648 | @@ -1,35 +0,0 @@ | |||
4649 | 1 | from os import stat | ||
4650 | 2 | from stat import S_ISBLK | ||
4651 | 3 | |||
4652 | 4 | from subprocess import ( | ||
4653 | 5 | check_call, | ||
4654 | 6 | check_output, | ||
4655 | 7 | call | ||
4656 | 8 | ) | ||
4657 | 9 | |||
4658 | 10 | |||
4659 | 11 | def is_block_device(path): | ||
4660 | 12 | ''' | ||
4661 | 13 | Confirm device at path is a valid block device node. | ||
4662 | 14 | |||
4663 | 15 | :returns: boolean: True if path is a block device, False if not. | ||
4664 | 16 | ''' | ||
4665 | 17 | return S_ISBLK(stat(path).st_mode) | ||
4666 | 18 | |||
4667 | 19 | |||
4668 | 20 | def zap_disk(block_device): | ||
4669 | 21 | ''' | ||
4670 | 22 | Clear a block device of partition table. Relies on sgdisk, which is | ||
4671 | 23 | installed as pat of the 'gdisk' package in Ubuntu. | ||
4672 | 24 | |||
4673 | 25 | :param block_device: str: Full path of block device to clean. | ||
4674 | 26 | ''' | ||
4675 | 27 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up | ||
4676 | 28 | call(['sgdisk', '--zap-all', '--mbrtogpt', | ||
4677 | 29 | '--clear', block_device]) | ||
4678 | 30 | dev_end = check_output(['blockdev', '--getsz', block_device]) | ||
4679 | 31 | gpt_end = int(dev_end.split()[0]) - 100 | ||
4680 | 32 | check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), | ||
4681 | 33 | 'bs=1M', 'count=1']) | ||
4682 | 34 | check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), | ||
4683 | 35 | 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) | ||
4684 | 36 | 0 | ||
4685 | === removed directory 'hooks/charmhelpers/contrib/templating' | |||
4686 | === removed file 'hooks/charmhelpers/contrib/templating/__init__.py' | |||
4687 | === removed file 'hooks/charmhelpers/contrib/templating/contexts.py' | |||
4688 | --- hooks/charmhelpers/contrib/templating/contexts.py 2014-05-09 20:11:59 +0000 | |||
4689 | +++ hooks/charmhelpers/contrib/templating/contexts.py 1970-01-01 00:00:00 +0000 | |||
4690 | @@ -1,104 +0,0 @@ | |||
4691 | 1 | # Copyright 2013 Canonical Ltd. | ||
4692 | 2 | # | ||
4693 | 3 | # Authors: | ||
4694 | 4 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
4695 | 5 | """A helper to create a yaml cache of config with namespaced relation data.""" | ||
4696 | 6 | import os | ||
4697 | 7 | import yaml | ||
4698 | 8 | |||
4699 | 9 | import charmhelpers.core.hookenv | ||
4700 | 10 | |||
4701 | 11 | |||
4702 | 12 | charm_dir = os.environ.get('CHARM_DIR', '') | ||
4703 | 13 | |||
4704 | 14 | |||
4705 | 15 | def dict_keys_without_hyphens(a_dict): | ||
4706 | 16 | """Return the a new dict with underscores instead of hyphens in keys.""" | ||
4707 | 17 | return dict( | ||
4708 | 18 | (key.replace('-', '_'), val) for key, val in a_dict.items()) | ||
4709 | 19 | |||
4710 | 20 | |||
4711 | 21 | def update_relations(context, namespace_separator=':'): | ||
4712 | 22 | """Update the context with the relation data.""" | ||
4713 | 23 | # Add any relation data prefixed with the relation type. | ||
4714 | 24 | relation_type = charmhelpers.core.hookenv.relation_type() | ||
4715 | 25 | relations = [] | ||
4716 | 26 | context['current_relation'] = {} | ||
4717 | 27 | if relation_type is not None: | ||
4718 | 28 | relation_data = charmhelpers.core.hookenv.relation_get() | ||
4719 | 29 | context['current_relation'] = relation_data | ||
4720 | 30 | # Deprecated: the following use of relation data as keys | ||
4721 | 31 | # directly in the context will be removed. | ||
4722 | 32 | relation_data = dict( | ||
4723 | 33 | ("{relation_type}{namespace_separator}{key}".format( | ||
4724 | 34 | relation_type=relation_type, | ||
4725 | 35 | key=key, | ||
4726 | 36 | namespace_separator=namespace_separator), val) | ||
4727 | 37 | for key, val in relation_data.items()) | ||
4728 | 38 | relation_data = dict_keys_without_hyphens(relation_data) | ||
4729 | 39 | context.update(relation_data) | ||
4730 | 40 | relations = charmhelpers.core.hookenv.relations_of_type(relation_type) | ||
4731 | 41 | relations = [dict_keys_without_hyphens(rel) for rel in relations] | ||
4732 | 42 | |||
4733 | 43 | if 'relations_deprecated' not in context: | ||
4734 | 44 | context['relations_deprecated'] = {} | ||
4735 | 45 | if relation_type is not None: | ||
4736 | 46 | relation_type = relation_type.replace('-', '_') | ||
4737 | 47 | context['relations_deprecated'][relation_type] = relations | ||
4738 | 48 | |||
4739 | 49 | context['relations'] = charmhelpers.core.hookenv.relations() | ||
4740 | 50 | |||
4741 | 51 | |||
4742 | 52 | def juju_state_to_yaml(yaml_path, namespace_separator=':', | ||
4743 | 53 | allow_hyphens_in_keys=True): | ||
4744 | 54 | """Update the juju config and state in a yaml file. | ||
4745 | 55 | |||
4746 | 56 | This includes any current relation-get data, and the charm | ||
4747 | 57 | directory. | ||
4748 | 58 | |||
4749 | 59 | This function was created for the ansible and saltstack | ||
4750 | 60 | support, as those libraries can use a yaml file to supply | ||
4751 | 61 | context to templates, but it may be useful generally to | ||
4752 | 62 | create and update an on-disk cache of all the config, including | ||
4753 | 63 | previous relation data. | ||
4754 | 64 | |||
4755 | 65 | By default, hyphens are allowed in keys as this is supported | ||
4756 | 66 | by yaml, but for tools like ansible, hyphens are not valid [1]. | ||
4757 | 67 | |||
4758 | 68 | [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name | ||
4759 | 69 | """ | ||
4760 | 70 | config = charmhelpers.core.hookenv.config() | ||
4761 | 71 | |||
4762 | 72 | # Add the charm_dir which we will need to refer to charm | ||
4763 | 73 | # file resources etc. | ||
4764 | 74 | config['charm_dir'] = charm_dir | ||
4765 | 75 | config['local_unit'] = charmhelpers.core.hookenv.local_unit() | ||
4766 | 76 | config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() | ||
4767 | 77 | config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( | ||
4768 | 78 | 'public-address' | ||
4769 | 79 | ) | ||
4770 | 80 | |||
4771 | 81 | # Don't use non-standard tags for unicode which will not | ||
4772 | 82 | # work when salt uses yaml.load_safe. | ||
4773 | 83 | yaml.add_representer(unicode, lambda dumper, | ||
4774 | 84 | value: dumper.represent_scalar( | ||
4775 | 85 | u'tag:yaml.org,2002:str', value)) | ||
4776 | 86 | |||
4777 | 87 | yaml_dir = os.path.dirname(yaml_path) | ||
4778 | 88 | if not os.path.exists(yaml_dir): | ||
4779 | 89 | os.makedirs(yaml_dir) | ||
4780 | 90 | |||
4781 | 91 | if os.path.exists(yaml_path): | ||
4782 | 92 | with open(yaml_path, "r") as existing_vars_file: | ||
4783 | 93 | existing_vars = yaml.load(existing_vars_file.read()) | ||
4784 | 94 | else: | ||
4785 | 95 | existing_vars = {} | ||
4786 | 96 | |||
4787 | 97 | if not allow_hyphens_in_keys: | ||
4788 | 98 | config = dict_keys_without_hyphens(config) | ||
4789 | 99 | existing_vars.update(config) | ||
4790 | 100 | |||
4791 | 101 | update_relations(existing_vars, namespace_separator) | ||
4792 | 102 | |||
4793 | 103 | with open(yaml_path, "w+") as fp: | ||
4794 | 104 | fp.write(yaml.dump(existing_vars, default_flow_style=False)) | ||
4795 | 105 | 0 | ||
4796 | === removed file 'hooks/charmhelpers/contrib/templating/pyformat.py' | |||
4797 | --- hooks/charmhelpers/contrib/templating/pyformat.py 2013-11-26 17:12:54 +0000 | |||
4798 | +++ hooks/charmhelpers/contrib/templating/pyformat.py 1970-01-01 00:00:00 +0000 | |||
4799 | @@ -1,13 +0,0 @@ | |||
4800 | 1 | ''' | ||
4801 | 2 | Templating using standard Python str.format() method. | ||
4802 | 3 | ''' | ||
4803 | 4 | |||
4804 | 5 | from charmhelpers.core import hookenv | ||
4805 | 6 | |||
4806 | 7 | |||
4807 | 8 | def render(template, extra={}, **kwargs): | ||
4808 | 9 | """Return the template rendered using Python's str.format().""" | ||
4809 | 10 | context = hookenv.execution_environment() | ||
4810 | 11 | context.update(extra) | ||
4811 | 12 | context.update(kwargs) | ||
4812 | 13 | return template.format(**context) | ||
4813 | 14 | 0 | ||
4814 | === removed directory 'hooks/charmhelpers/contrib/unison' | |||
4815 | === removed file 'hooks/charmhelpers/contrib/unison/__init__.py' | |||
4816 | --- hooks/charmhelpers/contrib/unison/__init__.py 2014-05-09 20:11:59 +0000 | |||
4817 | +++ hooks/charmhelpers/contrib/unison/__init__.py 1970-01-01 00:00:00 +0000 | |||
4818 | @@ -1,257 +0,0 @@ | |||
4819 | 1 | # Easy file synchronization among peer units using ssh + unison. | ||
4820 | 2 | # | ||
4821 | 3 | # From *both* peer relation -joined and -changed, add a call to | ||
4822 | 4 | # ssh_authorized_peers() describing the peer relation and the desired | ||
4823 | 5 | # user + group. After all peer relations have settled, all hosts should | ||
4824 | 6 | # be able to connect to on another via key auth'd ssh as the specified user. | ||
4825 | 7 | # | ||
4826 | 8 | # Other hooks are then free to synchronize files and directories using | ||
4827 | 9 | # sync_to_peers(). | ||
4828 | 10 | # | ||
4829 | 11 | # For a peer relation named 'cluster', for example: | ||
4830 | 12 | # | ||
4831 | 13 | # cluster-relation-joined: | ||
4832 | 14 | # ... | ||
4833 | 15 | # ssh_authorized_peers(peer_interface='cluster', | ||
4834 | 16 | # user='juju_ssh', group='juju_ssh', | ||
4835 | 17 | # ensure_user=True) | ||
4836 | 18 | # ... | ||
4837 | 19 | # | ||
4838 | 20 | # cluster-relation-changed: | ||
4839 | 21 | # ... | ||
4840 | 22 | # ssh_authorized_peers(peer_interface='cluster', | ||
4841 | 23 | # user='juju_ssh', group='juju_ssh', | ||
4842 | 24 | # ensure_user=True) | ||
4843 | 25 | # ... | ||
4844 | 26 | # | ||
4845 | 27 | # Hooks are now free to sync files as easily as: | ||
4846 | 28 | # | ||
4847 | 29 | # files = ['/etc/fstab', '/etc/apt.conf.d/'] | ||
4848 | 30 | # sync_to_peers(peer_interface='cluster', | ||
4849 | 31 | # user='juju_ssh, paths=[files]) | ||
4850 | 32 | # | ||
4851 | 33 | # It is assumed the charm itself has setup permissions on each unit | ||
4852 | 34 | # such that 'juju_ssh' has read + write permissions. Also assumed | ||
4853 | 35 | # that the calling charm takes care of leader delegation. | ||
4854 | 36 | # | ||
4855 | 37 | # Additionally files can be synchronized only to an specific unit: | ||
4856 | 38 | # sync_to_peer(slave_address, user='juju_ssh', | ||
4857 | 39 | # paths=[files], verbose=False) | ||
4858 | 40 | |||
4859 | 41 | import os | ||
4860 | 42 | import pwd | ||
4861 | 43 | |||
4862 | 44 | from copy import copy | ||
4863 | 45 | from subprocess import check_call, check_output | ||
4864 | 46 | |||
4865 | 47 | from charmhelpers.core.host import ( | ||
4866 | 48 | adduser, | ||
4867 | 49 | add_user_to_group, | ||
4868 | 50 | ) | ||
4869 | 51 | |||
4870 | 52 | from charmhelpers.core.hookenv import ( | ||
4871 | 53 | log, | ||
4872 | 54 | hook_name, | ||
4873 | 55 | relation_ids, | ||
4874 | 56 | related_units, | ||
4875 | 57 | relation_set, | ||
4876 | 58 | relation_get, | ||
4877 | 59 | unit_private_ip, | ||
4878 | 60 | ERROR, | ||
4879 | 61 | ) | ||
4880 | 62 | |||
4881 | 63 | BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', | ||
4882 | 64 | '-fastcheck=true', '-group=false', '-owner=false', | ||
4883 | 65 | '-prefer=newer', '-times=true'] | ||
4884 | 66 | |||
4885 | 67 | |||
4886 | 68 | def get_homedir(user): | ||
4887 | 69 | try: | ||
4888 | 70 | user = pwd.getpwnam(user) | ||
4889 | 71 | return user.pw_dir | ||
4890 | 72 | except KeyError: | ||
4891 | 73 | log('Could not get homedir for user %s: user exists?', ERROR) | ||
4892 | 74 | raise Exception | ||
4893 | 75 | |||
4894 | 76 | |||
4895 | 77 | def create_private_key(user, priv_key_path): | ||
4896 | 78 | if not os.path.isfile(priv_key_path): | ||
4897 | 79 | log('Generating new SSH key for user %s.' % user) | ||
4898 | 80 | cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', | ||
4899 | 81 | '-f', priv_key_path] | ||
4900 | 82 | check_call(cmd) | ||
4901 | 83 | else: | ||
4902 | 84 | log('SSH key already exists at %s.' % priv_key_path) | ||
4903 | 85 | check_call(['chown', user, priv_key_path]) | ||
4904 | 86 | check_call(['chmod', '0600', priv_key_path]) | ||
4905 | 87 | |||
4906 | 88 | |||
4907 | 89 | def create_public_key(user, priv_key_path, pub_key_path): | ||
4908 | 90 | if not os.path.isfile(pub_key_path): | ||
4909 | 91 | log('Generating missing ssh public key @ %s.' % pub_key_path) | ||
4910 | 92 | cmd = ['ssh-keygen', '-y', '-f', priv_key_path] | ||
4911 | 93 | p = check_output(cmd).strip() | ||
4912 | 94 | with open(pub_key_path, 'wb') as out: | ||
4913 | 95 | out.write(p) | ||
4914 | 96 | check_call(['chown', user, pub_key_path]) | ||
4915 | 97 | |||
4916 | 98 | |||
4917 | 99 | def get_keypair(user): | ||
4918 | 100 | home_dir = get_homedir(user) | ||
4919 | 101 | ssh_dir = os.path.join(home_dir, '.ssh') | ||
4920 | 102 | priv_key = os.path.join(ssh_dir, 'id_rsa') | ||
4921 | 103 | pub_key = '%s.pub' % priv_key | ||
4922 | 104 | |||
4923 | 105 | if not os.path.isdir(ssh_dir): | ||
4924 | 106 | os.mkdir(ssh_dir) | ||
4925 | 107 | check_call(['chown', '-R', user, ssh_dir]) | ||
4926 | 108 | |||
4927 | 109 | create_private_key(user, priv_key) | ||
4928 | 110 | create_public_key(user, priv_key, pub_key) | ||
4929 | 111 | |||
4930 | 112 | with open(priv_key, 'r') as p: | ||
4931 | 113 | _priv = p.read().strip() | ||
4932 | 114 | |||
4933 | 115 | with open(pub_key, 'r') as p: | ||
4934 | 116 | _pub = p.read().strip() | ||
4935 | 117 | |||
4936 | 118 | return (_priv, _pub) | ||
4937 | 119 | |||
4938 | 120 | |||
4939 | 121 | def write_authorized_keys(user, keys): | ||
4940 | 122 | home_dir = get_homedir(user) | ||
4941 | 123 | ssh_dir = os.path.join(home_dir, '.ssh') | ||
4942 | 124 | auth_keys = os.path.join(ssh_dir, 'authorized_keys') | ||
4943 | 125 | log('Syncing authorized_keys @ %s.' % auth_keys) | ||
4944 | 126 | with open(auth_keys, 'wb') as out: | ||
4945 | 127 | for k in keys: | ||
4946 | 128 | out.write('%s\n' % k) | ||
4947 | 129 | |||
4948 | 130 | |||
4949 | 131 | def write_known_hosts(user, hosts): | ||
4950 | 132 | home_dir = get_homedir(user) | ||
4951 | 133 | ssh_dir = os.path.join(home_dir, '.ssh') | ||
4952 | 134 | known_hosts = os.path.join(ssh_dir, 'known_hosts') | ||
4953 | 135 | khosts = [] | ||
4954 | 136 | for host in hosts: | ||
4955 | 137 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] | ||
4956 | 138 | remote_key = check_output(cmd).strip() | ||
4957 | 139 | khosts.append(remote_key) | ||
4958 | 140 | log('Syncing known_hosts @ %s.' % known_hosts) | ||
4959 | 141 | with open(known_hosts, 'wb') as out: | ||
4960 | 142 | for host in khosts: | ||
4961 | 143 | out.write('%s\n' % host) | ||
4962 | 144 | |||
4963 | 145 | |||
4964 | 146 | def ensure_user(user, group=None): | ||
4965 | 147 | adduser(user) | ||
4966 | 148 | if group: | ||
4967 | 149 | add_user_to_group(user, group) | ||
4968 | 150 | |||
4969 | 151 | |||
4970 | 152 | def ssh_authorized_peers(peer_interface, user, group=None, | ||
4971 | 153 | ensure_local_user=False): | ||
4972 | 154 | """ | ||
4973 | 155 | Main setup function, should be called from both peer -changed and -joined | ||
4974 | 156 | hooks with the same parameters. | ||
4975 | 157 | """ | ||
4976 | 158 | if ensure_local_user: | ||
4977 | 159 | ensure_user(user, group) | ||
4978 | 160 | priv_key, pub_key = get_keypair(user) | ||
4979 | 161 | hook = hook_name() | ||
4980 | 162 | if hook == '%s-relation-joined' % peer_interface: | ||
4981 | 163 | relation_set(ssh_pub_key=pub_key) | ||
4982 | 164 | elif hook == '%s-relation-changed' % peer_interface: | ||
4983 | 165 | hosts = [] | ||
4984 | 166 | keys = [] | ||
4985 | 167 | |||
4986 | 168 | for r_id in relation_ids(peer_interface): | ||
4987 | 169 | for unit in related_units(r_id): | ||
4988 | 170 | ssh_pub_key = relation_get('ssh_pub_key', | ||
4989 | 171 | rid=r_id, | ||
4990 | 172 | unit=unit) | ||
4991 | 173 | priv_addr = relation_get('private-address', | ||
4992 | 174 | rid=r_id, | ||
4993 | 175 | unit=unit) | ||
4994 | 176 | if ssh_pub_key: | ||
4995 | 177 | keys.append(ssh_pub_key) | ||
4996 | 178 | hosts.append(priv_addr) | ||
4997 | 179 | else: | ||
4998 | 180 | log('ssh_authorized_peers(): ssh_pub_key ' | ||
4999 | 181 | 'missing for unit %s, skipping.' % unit) | ||
5000 | 182 | write_authorized_keys(user, keys) |
The diff has been truncated for viewing.
+1 LGTM.