Merge lp:~lazypower/charms/trusty/mongodb/fixing_st00f into lp:charms/mongodb
- Trusty Tahr (14.04)
- fixing_st00f
- Merge into trunk
Proposed by
Charles Butler
Status: | Rejected |
---|---|
Rejected by: | Charles Butler |
Proposed branch: | lp:~lazypower/charms/trusty/mongodb/fixing_st00f |
Merge into: | lp:charms/mongodb |
Diff against target: |
1403 lines (+597/-274) 11 files modified
.bzrignore (+3/-0) Makefile (+9/-1) hooks/charmhelpers/core/fstab.py (+116/-0) hooks/charmhelpers/core/hookenv.py (+103/-5) hooks/charmhelpers/core/host.py (+38/-8) hooks/charmhelpers/fetch/__init__.py (+130/-81) hooks/charmhelpers/fetch/bzrurl.py (+2/-1) hooks/hooks.py (+148/-171) hooks/install (+0/-5) metadata.yaml (+5/-2) tests/200_relate_ceilometer.test (+43/-0) |
To merge this branch: | bzr merge lp:~lazypower/charms/trusty/mongodb/fixing_st00f |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Charles Butler (community) | Needs Fixing | ||
Benjamin Saller (community) | Needs Fixing | ||
Review via email: mp+228714@code.launchpad.net |
Commit message
Description of the change
Proposed resolution for MongoDB - refactors to use charmhelpers, adds a basic relation test to validate ceilometer client integration isn't broken, and various fixes relating to clustering.
This is a rework of patch 49 that was reverted.
To post a comment you must log in.
- 54. By Charles Butler
-
Backports Storage Subordinate work into the charmhelpers revision
- 55. By Charles Butler
-
Fixes typo in raise statement of test
- 56. By Charles Butler
-
Dirty merge cleanup
Revision history for this message
Charles Butler (lazypower) wrote : | # |
And just noticed this "dirty merge cleanup" leaves a ton of stuff around. WIll revert, and re-try, again.
review:
Needs Fixing
Revision history for this message
Charles Butler (lazypower) wrote : | # |
Rejecting - this was an olllld merge that was bubbling up in the queue - and has been merged from a different, cleaner branch.
Revision history for this message
Marco Ceppi (marcoceppi) wrote : | # |
Thanks for removing from the queue.
Unmerged revisions
- 56. By Charles Butler
-
Dirty merge cleanup
- 55. By Charles Butler
-
Fixes typo in raise statement of test
- 54. By Charles Butler
-
Backports Storage Subordinate work into the charmhelpers revision
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' | |||
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 | |||
3 | +++ .bzrignore 2014-07-29 16:34:12 +0000 | |||
4 | @@ -0,0 +1,3 @@ | |||
5 | 1 | .git | ||
6 | 2 | bin/* | ||
7 | 3 | scripts/charm-helpers-sync.py | ||
8 | 0 | 4 | ||
9 | === modified file 'Makefile' | |||
10 | --- Makefile 2014-04-11 20:55:42 +0000 | |||
11 | +++ Makefile 2014-07-29 16:34:12 +0000 | |||
12 | @@ -13,9 +13,17 @@ | |||
13 | 13 | # You should have received a copy of the GNU Affero General Public License | 13 | # You should have received a copy of the GNU Affero General Public License |
14 | 14 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | 15 | 15 | ||
16 | 16 | PYTHON := /usr/bin/env python | ||
17 | 16 | 17 | ||
18 | 17 | unittest: | 18 | unittest: |
19 | 18 | tests/10-unit.test | 19 | tests/10-unit.test |
20 | 19 | 20 | ||
21 | 20 | sync: | 21 | sync: |
23 | 21 | @charm-helper-sync -c charm-helpers-sync.yaml | 22 | @mkdir -p bin |
24 | 23 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py > bin/charm_helpers_sync.py | ||
25 | 24 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml | ||
26 | 25 | |||
27 | 26 | clean: | ||
28 | 27 | @find . -name \*.pyc -delete | ||
29 | 28 | @find . -name '*.bak' -delete | ||
30 | 29 | |||
31 | 22 | 30 | ||
32 | === added file '__init__.py' | |||
33 | === added directory 'bin' | |||
34 | === added file 'hooks/charmhelpers/core/fstab.py' | |||
35 | --- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000 | |||
36 | +++ hooks/charmhelpers/core/fstab.py 2014-07-29 16:34:12 +0000 | |||
37 | @@ -0,0 +1,116 @@ | |||
38 | 1 | #!/usr/bin/env python | ||
39 | 2 | # -*- coding: utf-8 -*- | ||
40 | 3 | |||
41 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
42 | 5 | |||
43 | 6 | import os | ||
44 | 7 | |||
45 | 8 | |||
46 | 9 | class Fstab(file): | ||
47 | 10 | """This class extends file in order to implement a file reader/writer | ||
48 | 11 | for file `/etc/fstab` | ||
49 | 12 | """ | ||
50 | 13 | |||
51 | 14 | class Entry(object): | ||
52 | 15 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
53 | 16 | """ | ||
54 | 17 | def __init__(self, device, mountpoint, filesystem, | ||
55 | 18 | options, d=0, p=0): | ||
56 | 19 | self.device = device | ||
57 | 20 | self.mountpoint = mountpoint | ||
58 | 21 | self.filesystem = filesystem | ||
59 | 22 | |||
60 | 23 | if not options: | ||
61 | 24 | options = "defaults" | ||
62 | 25 | |||
63 | 26 | self.options = options | ||
64 | 27 | self.d = d | ||
65 | 28 | self.p = p | ||
66 | 29 | |||
67 | 30 | def __eq__(self, o): | ||
68 | 31 | return str(self) == str(o) | ||
69 | 32 | |||
70 | 33 | def __str__(self): | ||
71 | 34 | return "{} {} {} {} {} {}".format(self.device, | ||
72 | 35 | self.mountpoint, | ||
73 | 36 | self.filesystem, | ||
74 | 37 | self.options, | ||
75 | 38 | self.d, | ||
76 | 39 | self.p) | ||
77 | 40 | |||
78 | 41 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
79 | 42 | |||
80 | 43 | def __init__(self, path=None): | ||
81 | 44 | if path: | ||
82 | 45 | self._path = path | ||
83 | 46 | else: | ||
84 | 47 | self._path = self.DEFAULT_PATH | ||
85 | 48 | file.__init__(self, self._path, 'r+') | ||
86 | 49 | |||
87 | 50 | def _hydrate_entry(self, line): | ||
88 | 51 | # NOTE: use split with no arguments to split on any | ||
89 | 52 | # whitespace including tabs | ||
90 | 53 | return Fstab.Entry(*filter( | ||
91 | 54 | lambda x: x not in ('', None), | ||
92 | 55 | line.strip("\n").split())) | ||
93 | 56 | |||
94 | 57 | @property | ||
95 | 58 | def entries(self): | ||
96 | 59 | self.seek(0) | ||
97 | 60 | for line in self.readlines(): | ||
98 | 61 | try: | ||
99 | 62 | if not line.startswith("#"): | ||
100 | 63 | yield self._hydrate_entry(line) | ||
101 | 64 | except ValueError: | ||
102 | 65 | pass | ||
103 | 66 | |||
104 | 67 | def get_entry_by_attr(self, attr, value): | ||
105 | 68 | for entry in self.entries: | ||
106 | 69 | e_attr = getattr(entry, attr) | ||
107 | 70 | if e_attr == value: | ||
108 | 71 | return entry | ||
109 | 72 | return None | ||
110 | 73 | |||
111 | 74 | def add_entry(self, entry): | ||
112 | 75 | if self.get_entry_by_attr('device', entry.device): | ||
113 | 76 | return False | ||
114 | 77 | |||
115 | 78 | self.write(str(entry) + '\n') | ||
116 | 79 | self.truncate() | ||
117 | 80 | return entry | ||
118 | 81 | |||
119 | 82 | def remove_entry(self, entry): | ||
120 | 83 | self.seek(0) | ||
121 | 84 | |||
122 | 85 | lines = self.readlines() | ||
123 | 86 | |||
124 | 87 | found = False | ||
125 | 88 | for index, line in enumerate(lines): | ||
126 | 89 | if not line.startswith("#"): | ||
127 | 90 | if self._hydrate_entry(line) == entry: | ||
128 | 91 | found = True | ||
129 | 92 | break | ||
130 | 93 | |||
131 | 94 | if not found: | ||
132 | 95 | return False | ||
133 | 96 | |||
134 | 97 | lines.remove(line) | ||
135 | 98 | |||
136 | 99 | self.seek(0) | ||
137 | 100 | self.write(''.join(lines)) | ||
138 | 101 | self.truncate() | ||
139 | 102 | return True | ||
140 | 103 | |||
141 | 104 | @classmethod | ||
142 | 105 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
143 | 106 | fstab = cls(path=path) | ||
144 | 107 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
145 | 108 | if entry: | ||
146 | 109 | return fstab.remove_entry(entry) | ||
147 | 110 | return False | ||
148 | 111 | |||
149 | 112 | @classmethod | ||
150 | 113 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
151 | 114 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
152 | 115 | mountpoint, filesystem, | ||
153 | 116 | options=options)) | ||
154 | 0 | 117 | ||
155 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
156 | --- hooks/charmhelpers/core/hookenv.py 2014-04-11 20:55:42 +0000 | |||
157 | +++ hooks/charmhelpers/core/hookenv.py 2014-07-29 16:34:12 +0000 | |||
158 | @@ -25,7 +25,7 @@ | |||
159 | 25 | def cached(func): | 25 | def cached(func): |
160 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
161 | 27 | 27 | ||
163 | 28 | For example: | 28 | For example:: |
164 | 29 | 29 | ||
165 | 30 | @cached | 30 | @cached |
166 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
167 | @@ -155,6 +155,100 @@ | |||
168 | 155 | return os.path.basename(sys.argv[0]) | 155 | return os.path.basename(sys.argv[0]) |
169 | 156 | 156 | ||
170 | 157 | 157 | ||
171 | 158 | class Config(dict): | ||
172 | 159 | """A Juju charm config dictionary that can write itself to | ||
173 | 160 | disk (as json) and track which values have changed since | ||
174 | 161 | the previous hook invocation. | ||
175 | 162 | |||
176 | 163 | Do not instantiate this object directly - instead call | ||
177 | 164 | ``hookenv.config()`` | ||
178 | 165 | |||
179 | 166 | Example usage:: | ||
180 | 167 | |||
181 | 168 | >>> # inside a hook | ||
182 | 169 | >>> from charmhelpers.core import hookenv | ||
183 | 170 | >>> config = hookenv.config() | ||
184 | 171 | >>> config['foo'] | ||
185 | 172 | 'bar' | ||
186 | 173 | >>> config['mykey'] = 'myval' | ||
187 | 174 | >>> config.save() | ||
188 | 175 | |||
189 | 176 | |||
190 | 177 | >>> # user runs `juju set mycharm foo=baz` | ||
191 | 178 | >>> # now we're inside subsequent config-changed hook | ||
192 | 179 | >>> config = hookenv.config() | ||
193 | 180 | >>> config['foo'] | ||
194 | 181 | 'baz' | ||
195 | 182 | >>> # test to see if this val has changed since last hook | ||
196 | 183 | >>> config.changed('foo') | ||
197 | 184 | True | ||
198 | 185 | >>> # what was the previous value? | ||
199 | 186 | >>> config.previous('foo') | ||
200 | 187 | 'bar' | ||
201 | 188 | >>> # keys/values that we add are preserved across hooks | ||
202 | 189 | >>> config['mykey'] | ||
203 | 190 | 'myval' | ||
204 | 191 | >>> # don't forget to save at the end of hook! | ||
205 | 192 | >>> config.save() | ||
206 | 193 | |||
207 | 194 | """ | ||
208 | 195 | CONFIG_FILE_NAME = '.juju-persistent-config' | ||
209 | 196 | |||
210 | 197 | def __init__(self, *args, **kw): | ||
211 | 198 | super(Config, self).__init__(*args, **kw) | ||
212 | 199 | self._prev_dict = None | ||
213 | 200 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | ||
214 | 201 | if os.path.exists(self.path): | ||
215 | 202 | self.load_previous() | ||
216 | 203 | |||
217 | 204 | def load_previous(self, path=None): | ||
218 | 205 | """Load previous copy of config from disk so that current values | ||
219 | 206 | can be compared to previous values. | ||
220 | 207 | |||
221 | 208 | :param path: | ||
222 | 209 | |||
223 | 210 | File path from which to load the previous config. If `None`, | ||
224 | 211 | config is loaded from the default location. If `path` is | ||
225 | 212 | specified, subsequent `save()` calls will write to the same | ||
226 | 213 | path. | ||
227 | 214 | |||
228 | 215 | """ | ||
229 | 216 | self.path = path or self.path | ||
230 | 217 | with open(self.path) as f: | ||
231 | 218 | self._prev_dict = json.load(f) | ||
232 | 219 | |||
233 | 220 | def changed(self, key): | ||
234 | 221 | """Return true if the value for this key has changed since | ||
235 | 222 | the last save. | ||
236 | 223 | |||
237 | 224 | """ | ||
238 | 225 | if self._prev_dict is None: | ||
239 | 226 | return True | ||
240 | 227 | return self.previous(key) != self.get(key) | ||
241 | 228 | |||
242 | 229 | def previous(self, key): | ||
243 | 230 | """Return previous value for this key, or None if there | ||
244 | 231 | is no "previous" value. | ||
245 | 232 | |||
246 | 233 | """ | ||
247 | 234 | if self._prev_dict: | ||
248 | 235 | return self._prev_dict.get(key) | ||
249 | 236 | return None | ||
250 | 237 | |||
251 | 238 | def save(self): | ||
252 | 239 | """Save this config to disk. | ||
253 | 240 | |||
254 | 241 | Preserves items in _prev_dict that do not exist in self. | ||
255 | 242 | |||
256 | 243 | """ | ||
257 | 244 | if self._prev_dict: | ||
258 | 245 | for k, v in self._prev_dict.iteritems(): | ||
259 | 246 | if k not in self: | ||
260 | 247 | self[k] = v | ||
261 | 248 | with open(self.path, 'w') as f: | ||
262 | 249 | json.dump(self, f) | ||
263 | 250 | |||
264 | 251 | |||
265 | 158 | @cached | 252 | @cached |
266 | 159 | def config(scope=None): | 253 | def config(scope=None): |
267 | 160 | """Juju charm configuration""" | 254 | """Juju charm configuration""" |
268 | @@ -163,7 +257,10 @@ | |||
269 | 163 | config_cmd_line.append(scope) | 257 | config_cmd_line.append(scope) |
270 | 164 | config_cmd_line.append('--format=json') | 258 | config_cmd_line.append('--format=json') |
271 | 165 | try: | 259 | try: |
273 | 166 | return json.loads(subprocess.check_output(config_cmd_line)) | 260 | config_data = json.loads(subprocess.check_output(config_cmd_line)) |
274 | 261 | if scope is not None: | ||
275 | 262 | return config_data | ||
276 | 263 | return Config(config_data) | ||
277 | 167 | except ValueError: | 264 | except ValueError: |
278 | 168 | return None | 265 | return None |
279 | 169 | 266 | ||
280 | @@ -348,18 +445,19 @@ | |||
281 | 348 | class Hooks(object): | 445 | class Hooks(object): |
282 | 349 | """A convenient handler for hook functions. | 446 | """A convenient handler for hook functions. |
283 | 350 | 447 | ||
285 | 351 | Example: | 448 | Example:: |
286 | 449 | |||
287 | 352 | hooks = Hooks() | 450 | hooks = Hooks() |
288 | 353 | 451 | ||
289 | 354 | # register a hook, taking its name from the function name | 452 | # register a hook, taking its name from the function name |
290 | 355 | @hooks.hook() | 453 | @hooks.hook() |
291 | 356 | def install(): | 454 | def install(): |
293 | 357 | ... | 455 | pass # your code here |
294 | 358 | 456 | ||
295 | 359 | # register a hook, providing a custom hook name | 457 | # register a hook, providing a custom hook name |
296 | 360 | @hooks.hook("config-changed") | 458 | @hooks.hook("config-changed") |
297 | 361 | def config_changed(): | 459 | def config_changed(): |
299 | 362 | ... | 460 | pass # your code here |
300 | 363 | 461 | ||
301 | 364 | if __name__ == "__main__": | 462 | if __name__ == "__main__": |
302 | 365 | # execute a hook based on the name the program is called by | 463 | # execute a hook based on the name the program is called by |
303 | 366 | 464 | ||
304 | === modified file 'hooks/charmhelpers/core/host.py' | |||
305 | --- hooks/charmhelpers/core/host.py 2014-04-11 20:55:42 +0000 | |||
306 | +++ hooks/charmhelpers/core/host.py 2014-07-29 16:34:12 +0000 | |||
307 | @@ -16,6 +16,7 @@ | |||
308 | 16 | from collections import OrderedDict | 16 | from collections import OrderedDict |
309 | 17 | 17 | ||
310 | 18 | from hookenv import log | 18 | from hookenv import log |
311 | 19 | from fstab import Fstab | ||
312 | 19 | 20 | ||
313 | 20 | 21 | ||
314 | 21 | def service_start(service_name): | 22 | def service_start(service_name): |
315 | @@ -34,7 +35,8 @@ | |||
316 | 34 | 35 | ||
317 | 35 | 36 | ||
318 | 36 | def service_reload(service_name, restart_on_failure=False): | 37 | def service_reload(service_name, restart_on_failure=False): |
320 | 37 | """Reload a system service, optionally falling back to restart if reload fails""" | 38 | """Reload a system service, optionally falling back to restart if |
321 | 39 | reload fails""" | ||
322 | 38 | service_result = service('reload', service_name) | 40 | service_result = service('reload', service_name) |
323 | 39 | if not service_result and restart_on_failure: | 41 | if not service_result and restart_on_failure: |
324 | 40 | service_result = service('restart', service_name) | 42 | service_result = service('restart', service_name) |
325 | @@ -143,7 +145,19 @@ | |||
326 | 143 | target.write(content) | 145 | target.write(content) |
327 | 144 | 146 | ||
328 | 145 | 147 | ||
330 | 146 | def mount(device, mountpoint, options=None, persist=False): | 148 | def fstab_remove(mp): |
331 | 149 | """Remove the given mountpoint entry from /etc/fstab | ||
332 | 150 | """ | ||
333 | 151 | return Fstab.remove_by_mountpoint(mp) | ||
334 | 152 | |||
335 | 153 | |||
336 | 154 | def fstab_add(dev, mp, fs, options=None): | ||
337 | 155 | """Adds the given device entry to the /etc/fstab file | ||
338 | 156 | """ | ||
339 | 157 | return Fstab.add(dev, mp, fs, options=options) | ||
340 | 158 | |||
341 | 159 | |||
342 | 160 | def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): | ||
343 | 147 | """Mount a filesystem at a particular mountpoint""" | 161 | """Mount a filesystem at a particular mountpoint""" |
344 | 148 | cmd_args = ['mount'] | 162 | cmd_args = ['mount'] |
345 | 149 | if options is not None: | 163 | if options is not None: |
346 | @@ -154,9 +168,9 @@ | |||
347 | 154 | except subprocess.CalledProcessError, e: | 168 | except subprocess.CalledProcessError, e: |
348 | 155 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 169 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
349 | 156 | return False | 170 | return False |
350 | 171 | |||
351 | 157 | if persist: | 172 | if persist: |
354 | 158 | # TODO: update fstab | 173 | return fstab_add(device, mountpoint, filesystem, options=options) |
353 | 159 | pass | ||
355 | 160 | return True | 174 | return True |
356 | 161 | 175 | ||
357 | 162 | 176 | ||
358 | @@ -168,9 +182,9 @@ | |||
359 | 168 | except subprocess.CalledProcessError, e: | 182 | except subprocess.CalledProcessError, e: |
360 | 169 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 183 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
361 | 170 | return False | 184 | return False |
362 | 185 | |||
363 | 171 | if persist: | 186 | if persist: |
366 | 172 | # TODO: update fstab | 187 | return fstab_remove(mountpoint) |
365 | 173 | pass | ||
367 | 174 | return True | 188 | return True |
368 | 175 | 189 | ||
369 | 176 | 190 | ||
370 | @@ -197,13 +211,13 @@ | |||
371 | 197 | def restart_on_change(restart_map, stopstart=False): | 211 | def restart_on_change(restart_map, stopstart=False): |
372 | 198 | """Restart services based on configuration files changing | 212 | """Restart services based on configuration files changing |
373 | 199 | 213 | ||
375 | 200 | This function is used a decorator, for example | 214 | This function is used a decorator, for example:: |
376 | 201 | 215 | ||
377 | 202 | @restart_on_change({ | 216 | @restart_on_change({ |
378 | 203 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
379 | 204 | }) | 218 | }) |
380 | 205 | def ceph_client_changed(): | 219 | def ceph_client_changed(): |
382 | 206 | ... | 220 | pass # your code here |
383 | 207 | 221 | ||
384 | 208 | In this example, the cinder-api and cinder-volume services | 222 | In this example, the cinder-api and cinder-volume services |
385 | 209 | would be restarted if /etc/ceph/ceph.conf is changed by the | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the |
386 | @@ -295,3 +309,19 @@ | |||
387 | 295 | if 'link/ether' in words: | 309 | if 'link/ether' in words: |
388 | 296 | hwaddr = words[words.index('link/ether') + 1] | 310 | hwaddr = words[words.index('link/ether') + 1] |
389 | 297 | return hwaddr | 311 | return hwaddr |
390 | 312 | |||
391 | 313 | |||
392 | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): | ||
393 | 315 | '''Compare supplied revno with the revno of the installed package | ||
394 | 316 | |||
395 | 317 | * 1 => Installed revno is greater than supplied arg | ||
396 | 318 | * 0 => Installed revno is the same as supplied arg | ||
397 | 319 | * -1 => Installed revno is less than supplied arg | ||
398 | 320 | |||
399 | 321 | ''' | ||
400 | 322 | import apt_pkg | ||
401 | 323 | if not pkgcache: | ||
402 | 324 | apt_pkg.init() | ||
403 | 325 | pkgcache = apt_pkg.Cache() | ||
404 | 326 | pkg = pkgcache[package] | ||
405 | 327 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | ||
406 | 298 | 328 | ||
407 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
408 | --- hooks/charmhelpers/fetch/__init__.py 2014-04-11 20:55:42 +0000 | |||
409 | +++ hooks/charmhelpers/fetch/__init__.py 2014-07-29 16:34:12 +0000 | |||
410 | @@ -1,4 +1,5 @@ | |||
411 | 1 | import importlib | 1 | import importlib |
412 | 2 | import time | ||
413 | 2 | from yaml import safe_load | 3 | from yaml import safe_load |
414 | 3 | from charmhelpers.core.host import ( | 4 | from charmhelpers.core.host import ( |
415 | 4 | lsb_release | 5 | lsb_release |
416 | @@ -12,9 +13,9 @@ | |||
417 | 12 | config, | 13 | config, |
418 | 13 | log, | 14 | log, |
419 | 14 | ) | 15 | ) |
420 | 15 | import apt_pkg | ||
421 | 16 | import os | 16 | import os |
422 | 17 | 17 | ||
423 | 18 | |||
424 | 18 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive | 19 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
425 | 19 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | 20 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
426 | 20 | """ | 21 | """ |
427 | @@ -54,12 +55,74 @@ | |||
428 | 54 | 'icehouse/proposed': 'precise-proposed/icehouse', | 55 | 'icehouse/proposed': 'precise-proposed/icehouse', |
429 | 55 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', | 56 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', |
430 | 56 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', | 57 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
431 | 58 | # Juno | ||
432 | 59 | 'juno': 'trusty-updates/juno', | ||
433 | 60 | 'trusty-juno': 'trusty-updates/juno', | ||
434 | 61 | 'trusty-juno/updates': 'trusty-updates/juno', | ||
435 | 62 | 'trusty-updates/juno': 'trusty-updates/juno', | ||
436 | 63 | 'juno/proposed': 'trusty-proposed/juno', | ||
437 | 64 | 'juno/proposed': 'trusty-proposed/juno', | ||
438 | 65 | 'trusty-juno/proposed': 'trusty-proposed/juno', | ||
439 | 66 | 'trusty-proposed/juno': 'trusty-proposed/juno', | ||
440 | 57 | } | 67 | } |
441 | 58 | 68 | ||
442 | 69 | # The order of this list is very important. Handlers should be listed in from | ||
443 | 70 | # least- to most-specific URL matching. | ||
444 | 71 | FETCH_HANDLERS = ( | ||
445 | 72 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | ||
446 | 73 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | ||
447 | 74 | ) | ||
448 | 75 | |||
449 | 76 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. | ||
450 | 77 | APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. | ||
451 | 78 | APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. | ||
452 | 79 | |||
453 | 80 | |||
454 | 81 | class SourceConfigError(Exception): | ||
455 | 82 | pass | ||
456 | 83 | |||
457 | 84 | |||
458 | 85 | class UnhandledSource(Exception): | ||
459 | 86 | pass | ||
460 | 87 | |||
461 | 88 | |||
462 | 89 | class AptLockError(Exception): | ||
463 | 90 | pass | ||
464 | 91 | |||
465 | 92 | |||
466 | 93 | class BaseFetchHandler(object): | ||
467 | 94 | |||
468 | 95 | """Base class for FetchHandler implementations in fetch plugins""" | ||
469 | 96 | |||
470 | 97 | def can_handle(self, source): | ||
471 | 98 | """Returns True if the source can be handled. Otherwise returns | ||
472 | 99 | a string explaining why it cannot""" | ||
473 | 100 | return "Wrong source type" | ||
474 | 101 | |||
475 | 102 | def install(self, source): | ||
476 | 103 | """Try to download and unpack the source. Return the path to the | ||
477 | 104 | unpacked files or raise UnhandledSource.""" | ||
478 | 105 | raise UnhandledSource("Wrong source type {}".format(source)) | ||
479 | 106 | |||
480 | 107 | def parse_url(self, url): | ||
481 | 108 | return urlparse(url) | ||
482 | 109 | |||
483 | 110 | def base_url(self, url): | ||
484 | 111 | """Return url without querystring or fragment""" | ||
485 | 112 | parts = list(self.parse_url(url)) | ||
486 | 113 | parts[4:] = ['' for i in parts[4:]] | ||
487 | 114 | return urlunparse(parts) | ||
488 | 115 | |||
489 | 59 | 116 | ||
490 | 60 | def filter_installed_packages(packages): | 117 | def filter_installed_packages(packages): |
491 | 61 | """Returns a list of packages that require installation""" | 118 | """Returns a list of packages that require installation""" |
492 | 119 | import apt_pkg | ||
493 | 62 | apt_pkg.init() | 120 | apt_pkg.init() |
494 | 121 | |||
495 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | ||
496 | 123 | # another process is already building the cache). | ||
497 | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
498 | 125 | |||
499 | 63 | cache = apt_pkg.Cache() | 126 | cache = apt_pkg.Cache() |
500 | 64 | _pkgs = [] | 127 | _pkgs = [] |
501 | 65 | for package in packages: | 128 | for package in packages: |
502 | @@ -87,14 +150,7 @@ | |||
503 | 87 | cmd.extend(packages) | 150 | cmd.extend(packages) |
504 | 88 | log("Installing {} with options: {}".format(packages, | 151 | log("Installing {} with options: {}".format(packages, |
505 | 89 | options)) | 152 | options)) |
514 | 90 | env = os.environ.copy() | 153 | _run_apt_command(cmd, fatal) |
507 | 91 | if 'DEBIAN_FRONTEND' not in env: | ||
508 | 92 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
509 | 93 | |||
510 | 94 | if fatal: | ||
511 | 95 | subprocess.check_call(cmd, env=env) | ||
512 | 96 | else: | ||
513 | 97 | subprocess.call(cmd, env=env) | ||
515 | 98 | 154 | ||
516 | 99 | 155 | ||
517 | 100 | def apt_upgrade(options=None, fatal=False, dist=False): | 156 | def apt_upgrade(options=None, fatal=False, dist=False): |
518 | @@ -109,24 +165,13 @@ | |||
519 | 109 | else: | 165 | else: |
520 | 110 | cmd.append('upgrade') | 166 | cmd.append('upgrade') |
521 | 111 | log("Upgrading with options: {}".format(options)) | 167 | log("Upgrading with options: {}".format(options)) |
531 | 112 | 168 | _run_apt_command(cmd, fatal) | |
523 | 113 | env = os.environ.copy() | ||
524 | 114 | if 'DEBIAN_FRONTEND' not in env: | ||
525 | 115 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
526 | 116 | |||
527 | 117 | if fatal: | ||
528 | 118 | subprocess.check_call(cmd, env=env) | ||
529 | 119 | else: | ||
530 | 120 | subprocess.call(cmd, env=env) | ||
532 | 121 | 169 | ||
533 | 122 | 170 | ||
534 | 123 | def apt_update(fatal=False): | 171 | def apt_update(fatal=False): |
535 | 124 | """Update local apt cache""" | 172 | """Update local apt cache""" |
536 | 125 | cmd = ['apt-get', 'update'] | 173 | cmd = ['apt-get', 'update'] |
541 | 126 | if fatal: | 174 | _run_apt_command(cmd, fatal) |
538 | 127 | subprocess.check_call(cmd) | ||
539 | 128 | else: | ||
540 | 129 | subprocess.call(cmd) | ||
542 | 130 | 175 | ||
543 | 131 | 176 | ||
544 | 132 | def apt_purge(packages, fatal=False): | 177 | def apt_purge(packages, fatal=False): |
545 | @@ -137,10 +182,7 @@ | |||
546 | 137 | else: | 182 | else: |
547 | 138 | cmd.extend(packages) | 183 | cmd.extend(packages) |
548 | 139 | log("Purging {}".format(packages)) | 184 | log("Purging {}".format(packages)) |
553 | 140 | if fatal: | 185 | _run_apt_command(cmd, fatal) |
550 | 141 | subprocess.check_call(cmd) | ||
551 | 142 | else: | ||
552 | 143 | subprocess.call(cmd) | ||
554 | 144 | 186 | ||
555 | 145 | 187 | ||
556 | 146 | def apt_hold(packages, fatal=False): | 188 | def apt_hold(packages, fatal=False): |
557 | @@ -151,6 +193,7 @@ | |||
558 | 151 | else: | 193 | else: |
559 | 152 | cmd.extend(packages) | 194 | cmd.extend(packages) |
560 | 153 | log("Holding {}".format(packages)) | 195 | log("Holding {}".format(packages)) |
561 | 196 | |||
562 | 154 | if fatal: | 197 | if fatal: |
563 | 155 | subprocess.check_call(cmd) | 198 | subprocess.check_call(cmd) |
564 | 156 | else: | 199 | else: |
565 | @@ -184,57 +227,50 @@ | |||
566 | 184 | apt.write(PROPOSED_POCKET.format(release)) | 227 | apt.write(PROPOSED_POCKET.format(release)) |
567 | 185 | if key: | 228 | if key: |
568 | 186 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | 229 | subprocess.check_call(['apt-key', 'adv', '--keyserver', |
570 | 187 | 'keyserver.ubuntu.com', '--recv', | 230 | 'hkp://keyserver.ubuntu.com:80', '--recv', |
571 | 188 | key]) | 231 | key]) |
572 | 189 | 232 | ||
573 | 190 | 233 | ||
574 | 191 | class SourceConfigError(Exception): | ||
575 | 192 | pass | ||
576 | 193 | |||
577 | 194 | |||
578 | 195 | def configure_sources(update=False, | 234 | def configure_sources(update=False, |
579 | 196 | sources_var='install_sources', | 235 | sources_var='install_sources', |
580 | 197 | keys_var='install_keys'): | 236 | keys_var='install_keys'): |
581 | 198 | """ | 237 | """ |
583 | 199 | Configure multiple sources from charm configuration | 238 | Configure multiple sources from charm configuration. |
584 | 239 | |||
585 | 240 | The lists are encoded as yaml fragments in the configuration. | ||
586 | 241 | The frament needs to be included as a string. | ||
587 | 200 | 242 | ||
588 | 201 | Example config: | 243 | Example config: |
590 | 202 | install_sources: | 244 | install_sources: | |
591 | 203 | - "ppa:foo" | 245 | - "ppa:foo" |
592 | 204 | - "http://example.com/repo precise main" | 246 | - "http://example.com/repo precise main" |
594 | 205 | install_keys: | 247 | install_keys: | |
595 | 206 | - null | 248 | - null |
596 | 207 | - "a1b2c3d4" | 249 | - "a1b2c3d4" |
597 | 208 | 250 | ||
598 | 209 | Note that 'null' (a.k.a. None) should not be quoted. | 251 | Note that 'null' (a.k.a. None) should not be quoted. |
599 | 210 | """ | 252 | """ |
607 | 211 | sources = safe_load(config(sources_var)) | 253 | sources = safe_load((config(sources_var) or '').strip()) or [] |
608 | 212 | keys = config(keys_var) | 254 | keys = safe_load((config(keys_var) or '').strip()) or None |
609 | 213 | if keys is not None: | 255 | |
610 | 214 | keys = safe_load(keys) | 256 | if isinstance(sources, basestring): |
611 | 215 | if isinstance(sources, basestring) and ( | 257 | sources = [sources] |
612 | 216 | keys is None or isinstance(keys, basestring)): | 258 | |
613 | 217 | add_source(sources, keys) | 259 | if keys is None: |
614 | 260 | for source in sources: | ||
615 | 261 | add_source(source, None) | ||
616 | 218 | else: | 262 | else: |
622 | 219 | if not len(sources) == len(keys): | 263 | if isinstance(keys, basestring): |
623 | 220 | msg = 'Install sources and keys lists are different lengths' | 264 | keys = [keys] |
624 | 221 | raise SourceConfigError(msg) | 265 | |
625 | 222 | for src_num in range(len(sources)): | 266 | if len(sources) != len(keys): |
626 | 223 | add_source(sources[src_num], keys[src_num]) | 267 | raise SourceConfigError( |
627 | 268 | 'Install sources and keys lists are different lengths') | ||
628 | 269 | for source, key in zip(sources, keys): | ||
629 | 270 | add_source(source, key) | ||
630 | 224 | if update: | 271 | if update: |
631 | 225 | apt_update(fatal=True) | 272 | apt_update(fatal=True) |
632 | 226 | 273 | ||
633 | 227 | # The order of this list is very important. Handlers should be listed in from | ||
634 | 228 | # least- to most-specific URL matching. | ||
635 | 229 | FETCH_HANDLERS = ( | ||
636 | 230 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | ||
637 | 231 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | ||
638 | 232 | ) | ||
639 | 233 | |||
640 | 234 | |||
641 | 235 | class UnhandledSource(Exception): | ||
642 | 236 | pass | ||
643 | 237 | |||
644 | 238 | 274 | ||
645 | 239 | def install_remote(source): | 275 | def install_remote(source): |
646 | 240 | """ | 276 | """ |
647 | @@ -265,30 +301,6 @@ | |||
648 | 265 | return install_remote(source) | 301 | return install_remote(source) |
649 | 266 | 302 | ||
650 | 267 | 303 | ||
651 | 268 | class BaseFetchHandler(object): | ||
652 | 269 | |||
653 | 270 | """Base class for FetchHandler implementations in fetch plugins""" | ||
654 | 271 | |||
655 | 272 | def can_handle(self, source): | ||
656 | 273 | """Returns True if the source can be handled. Otherwise returns | ||
657 | 274 | a string explaining why it cannot""" | ||
658 | 275 | return "Wrong source type" | ||
659 | 276 | |||
660 | 277 | def install(self, source): | ||
661 | 278 | """Try to download and unpack the source. Return the path to the | ||
662 | 279 | unpacked files or raise UnhandledSource.""" | ||
663 | 280 | raise UnhandledSource("Wrong source type {}".format(source)) | ||
664 | 281 | |||
665 | 282 | def parse_url(self, url): | ||
666 | 283 | return urlparse(url) | ||
667 | 284 | |||
668 | 285 | def base_url(self, url): | ||
669 | 286 | """Return url without querystring or fragment""" | ||
670 | 287 | parts = list(self.parse_url(url)) | ||
671 | 288 | parts[4:] = ['' for i in parts[4:]] | ||
672 | 289 | return urlunparse(parts) | ||
673 | 290 | |||
674 | 291 | |||
675 | 292 | def plugins(fetch_handlers=None): | 304 | def plugins(fetch_handlers=None): |
676 | 293 | if not fetch_handlers: | 305 | if not fetch_handlers: |
677 | 294 | fetch_handlers = FETCH_HANDLERS | 306 | fetch_handlers = FETCH_HANDLERS |
678 | @@ -306,3 +318,40 @@ | |||
679 | 306 | log("FetchHandler {} not found, skipping plugin".format( | 318 | log("FetchHandler {} not found, skipping plugin".format( |
680 | 307 | handler_name)) | 319 | handler_name)) |
681 | 308 | return plugin_list | 320 | return plugin_list |
682 | 321 | |||
683 | 322 | |||
684 | 323 | def _run_apt_command(cmd, fatal=False): | ||
685 | 324 | """ | ||
686 | 325 | Run an APT command, checking output and retrying if the fatal flag is set | ||
687 | 326 | to True. | ||
688 | 327 | |||
689 | 328 | :param: cmd: str: The apt command to run. | ||
690 | 329 | :param: fatal: bool: Whether the command's output should be checked and | ||
691 | 330 | retried. | ||
692 | 331 | """ | ||
693 | 332 | env = os.environ.copy() | ||
694 | 333 | |||
695 | 334 | if 'DEBIAN_FRONTEND' not in env: | ||
696 | 335 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
697 | 336 | |||
698 | 337 | if fatal: | ||
699 | 338 | retry_count = 0 | ||
700 | 339 | result = None | ||
701 | 340 | |||
702 | 341 | # If the command is considered "fatal", we need to retry if the apt | ||
703 | 342 | # lock was not acquired. | ||
704 | 343 | |||
705 | 344 | while result is None or result == APT_NO_LOCK: | ||
706 | 345 | try: | ||
707 | 346 | result = subprocess.check_call(cmd, env=env) | ||
708 | 347 | except subprocess.CalledProcessError, e: | ||
709 | 348 | retry_count = retry_count + 1 | ||
710 | 349 | if retry_count > APT_NO_LOCK_RETRY_COUNT: | ||
711 | 350 | raise | ||
712 | 351 | result = e.returncode | ||
713 | 352 | log("Couldn't acquire DPKG lock. Will retry in {} seconds." | ||
714 | 353 | "".format(APT_NO_LOCK_RETRY_DELAY)) | ||
715 | 354 | time.sleep(APT_NO_LOCK_RETRY_DELAY) | ||
716 | 355 | |||
717 | 356 | else: | ||
718 | 357 | subprocess.call(cmd, env=env) | ||
719 | 309 | 358 | ||
720 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
721 | --- hooks/charmhelpers/fetch/bzrurl.py 2014-04-11 20:55:42 +0000 | |||
722 | +++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-29 16:34:12 +0000 | |||
723 | @@ -39,7 +39,8 @@ | |||
724 | 39 | def install(self, source): | 39 | def install(self, source): |
725 | 40 | url_parts = self.parse_url(source) | 40 | url_parts = self.parse_url(source) |
726 | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] |
728 | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
729 | 43 | branch_name) | ||
730 | 43 | if not os.path.exists(dest_dir): | 44 | if not os.path.exists(dest_dir): |
731 | 44 | mkdir(dest_dir, perms=0755) | 45 | mkdir(dest_dir, perms=0755) |
732 | 45 | try: | 46 | try: |
733 | 46 | 47 | ||
734 | === modified file 'hooks/hooks.py' | |||
735 | --- hooks/hooks.py 2014-07-29 10:22:55 +0000 | |||
736 | +++ hooks/hooks.py 2014-07-29 16:34:12 +0000 | |||
737 | @@ -6,7 +6,6 @@ | |||
738 | 6 | ''' | 6 | ''' |
739 | 7 | 7 | ||
740 | 8 | import commands | 8 | import commands |
741 | 9 | import json | ||
742 | 10 | import os | 9 | import os |
743 | 11 | import re | 10 | import re |
744 | 12 | import signal | 11 | import signal |
745 | @@ -15,7 +14,6 @@ | |||
746 | 15 | import sys | 14 | import sys |
747 | 16 | import time | 15 | import time |
748 | 17 | import yaml | 16 | import yaml |
749 | 18 | import argparse | ||
750 | 19 | 17 | ||
751 | 20 | from os import chmod | 18 | from os import chmod |
752 | 21 | from os import remove | 19 | from os import remove |
753 | @@ -29,10 +27,26 @@ | |||
754 | 29 | apt_update, | 27 | apt_update, |
755 | 30 | apt_install | 28 | apt_install |
756 | 31 | ) | 29 | ) |
757 | 30 | |||
758 | 32 | from charmhelpers.core.hookenv import ( | 31 | from charmhelpers.core.hookenv import ( |
762 | 33 | config | 32 | config, |
763 | 34 | ) | 33 | unit_get, |
764 | 35 | 34 | relation_get, | |
765 | 35 | relation_set, | ||
766 | 36 | relations_of_type, | ||
767 | 37 | relation_id, | ||
768 | 38 | open_port, | ||
769 | 39 | close_port, | ||
770 | 40 | Hooks, | ||
771 | 41 | ) | ||
772 | 42 | |||
773 | 43 | from charmhelpers.core.hookenv import log as juju_log | ||
774 | 44 | |||
775 | 45 | from charmhelpers.core.host import ( | ||
776 | 46 | service, | ||
777 | 47 | ) | ||
778 | 48 | |||
779 | 49 | hooks = Hooks() | ||
780 | 36 | 50 | ||
781 | 37 | ############################################################################### | 51 | ############################################################################### |
782 | 38 | # Global variables | 52 | # Global variables |
783 | @@ -40,8 +54,8 @@ | |||
784 | 40 | default_mongodb_config = "/etc/mongodb.conf" | 54 | default_mongodb_config = "/etc/mongodb.conf" |
785 | 41 | default_mongodb_init_config = "/etc/init/mongodb.conf" | 55 | default_mongodb_init_config = "/etc/init/mongodb.conf" |
786 | 42 | default_mongos_list = "/etc/mongos.list" | 56 | default_mongos_list = "/etc/mongos.list" |
789 | 43 | default_wait_for = 20 | 57 | default_wait_for = 10 |
790 | 44 | default_max_tries = 20 | 58 | default_max_tries = 5 |
791 | 45 | 59 | ||
792 | 46 | ############################################################################### | 60 | ############################################################################### |
793 | 47 | # Supporting functions | 61 | # Supporting functions |
794 | @@ -494,7 +508,7 @@ | |||
795 | 494 | config.append("") | 508 | config.append("") |
796 | 495 | 509 | ||
797 | 496 | # arbiter | 510 | # arbiter |
799 | 497 | if config_data['arbiter'] != "disabled" and\ | 511 | if config_data['arbiter'] != "disabled" and \ |
800 | 498 | config_data['arbiter'] != "enabled": | 512 | config_data['arbiter'] != "enabled": |
801 | 499 | config.append("arbiter = %s" % config_data['arbiter']) | 513 | config.append("arbiter = %s" % config_data['arbiter']) |
802 | 500 | config.append("") | 514 | config.append("") |
803 | @@ -657,7 +671,7 @@ | |||
804 | 657 | 671 | ||
805 | 658 | 672 | ||
806 | 659 | def configsvr_status(wait_for=default_wait_for, max_tries=default_max_tries): | 673 | def configsvr_status(wait_for=default_wait_for, max_tries=default_max_tries): |
808 | 660 | config_data = config_get() | 674 | config_data = config() |
809 | 661 | current_try = 0 | 675 | current_try = 0 |
810 | 662 | while (process_check_pidfile('/var/run/mongodb/configsvr.pid') != | 676 | while (process_check_pidfile('/var/run/mongodb/configsvr.pid') != |
811 | 663 | (None, None)) and not port_check( | 677 | (None, None)) and not port_check( |
812 | @@ -685,7 +699,7 @@ | |||
813 | 685 | juju_log("disable_configsvr: port not defined.") | 699 | juju_log("disable_configsvr: port not defined.") |
814 | 686 | return(False) | 700 | return(False) |
815 | 687 | try: | 701 | try: |
817 | 688 | config_server_port = config_get('config_server_port') | 702 | config_server_port = config('config_server_port') |
818 | 689 | pid = open('/var/run/mongodb/configsvr.pid').read() | 703 | pid = open('/var/run/mongodb/configsvr.pid').read() |
819 | 690 | os.kill(int(pid), signal.SIGTERM) | 704 | os.kill(int(pid), signal.SIGTERM) |
820 | 691 | os.unlink('/var/run/mongodb/configsvr.pid') | 705 | os.unlink('/var/run/mongodb/configsvr.pid') |
821 | @@ -747,7 +761,7 @@ | |||
822 | 747 | 761 | ||
823 | 748 | 762 | ||
824 | 749 | def mongos_status(wait_for=default_wait_for, max_tries=default_max_tries): | 763 | def mongos_status(wait_for=default_wait_for, max_tries=default_max_tries): |
826 | 750 | config_data = config_get() | 764 | config_data = config() |
827 | 751 | current_try = 0 | 765 | current_try = 0 |
828 | 752 | while (process_check_pidfile('/var/run/mongodb/mongos.pid') != | 766 | while (process_check_pidfile('/var/run/mongodb/mongos.pid') != |
829 | 753 | (None, None)) and not port_check( | 767 | (None, None)) and not port_check( |
830 | @@ -839,17 +853,17 @@ | |||
831 | 839 | 853 | ||
832 | 840 | def restart_mongod(wait_for=default_wait_for, max_tries=default_max_tries): | 854 | def restart_mongod(wait_for=default_wait_for, max_tries=default_max_tries): |
833 | 841 | my_hostname = unit_get('public-address') | 855 | my_hostname = unit_get('public-address') |
835 | 842 | my_port = config_get('port') | 856 | my_port = config('port') |
836 | 843 | current_try = 0 | 857 | current_try = 0 |
837 | 844 | 858 | ||
839 | 845 | service('mongodb', 'stop') | 859 | service('stop', 'mongodb') |
840 | 846 | if os.path.exists('/var/lib/mongodb/mongod.lock'): | 860 | if os.path.exists('/var/lib/mongodb/mongod.lock'): |
841 | 847 | os.remove('/var/lib/mongodb/mongod.lock') | 861 | os.remove('/var/lib/mongodb/mongod.lock') |
842 | 848 | 862 | ||
844 | 849 | if not service('mongodb', 'start'): | 863 | if not service('start', 'mongodb'): |
845 | 850 | return False | 864 | return False |
846 | 851 | 865 | ||
848 | 852 | while (service('mongodb', 'status') and | 866 | while (service('status', 'mongodb') and |
849 | 853 | not port_check(my_hostname, my_port) and | 867 | not port_check(my_hostname, my_port) and |
850 | 854 | current_try < max_tries): | 868 | current_try < max_tries): |
851 | 855 | juju_log( | 869 | juju_log( |
852 | @@ -859,14 +873,14 @@ | |||
853 | 859 | current_try += 1 | 873 | current_try += 1 |
854 | 860 | 874 | ||
855 | 861 | return( | 875 | return( |
857 | 862 | (service('mongodb', 'status') == port_check(my_hostname, my_port)) | 876 | (service('status', 'mongodb') == port_check(my_hostname, my_port)) |
858 | 863 | is True) | 877 | is True) |
859 | 864 | 878 | ||
860 | 865 | 879 | ||
861 | 866 | def backup_cronjob(disable=False): | 880 | def backup_cronjob(disable=False): |
862 | 867 | """Generate the cronjob to backup with mongodbump.""" | 881 | """Generate the cronjob to backup with mongodbump.""" |
863 | 868 | juju_log('Setting up cronjob') | 882 | juju_log('Setting up cronjob') |
865 | 869 | config_data = config_get() | 883 | config_data = config() |
866 | 870 | backupdir = config_data['backup_directory'] | 884 | backupdir = config_data['backup_directory'] |
867 | 871 | bind_ip = config_data['bind_ip'] | 885 | bind_ip = config_data['bind_ip'] |
868 | 872 | cron_file = '/etc/cron.d/mongodb' | 886 | cron_file = '/etc/cron.d/mongodb' |
869 | @@ -915,18 +929,19 @@ | |||
870 | 915 | ############################################################################### | 929 | ############################################################################### |
871 | 916 | # Hook functions | 930 | # Hook functions |
872 | 917 | ############################################################################### | 931 | ############################################################################### |
873 | 932 | @hooks.hook('install') | ||
874 | 918 | def install_hook(): | 933 | def install_hook(): |
875 | 919 | juju_log("Installing mongodb") | 934 | juju_log("Installing mongodb") |
876 | 920 | add_source(config('source'), config('key')) | 935 | add_source(config('source'), config('key')) |
877 | 921 | apt_update(fatal=True) | 936 | apt_update(fatal=True) |
878 | 922 | apt_install(packages=['mongodb', 'python-yaml'], fatal=True) | 937 | apt_install(packages=['mongodb', 'python-yaml'], fatal=True) |
882 | 923 | return True | 938 | |
883 | 924 | 939 | ||
884 | 925 | 940 | @hooks.hook('config-changed') | |
885 | 926 | def config_changed(): | 941 | def config_changed(): |
886 | 927 | juju_log("Entering config_changed") | 942 | juju_log("Entering config_changed") |
887 | 928 | print "Entering config_changed" | 943 | print "Entering config_changed" |
889 | 929 | config_data = config_get() | 944 | config_data = config() |
890 | 930 | print "config_data: ", config_data | 945 | print "config_data: ", config_data |
891 | 931 | mongodb_config = open(default_mongodb_config).read() | 946 | mongodb_config = open(default_mongodb_config).read() |
892 | 932 | 947 | ||
893 | @@ -1048,7 +1063,7 @@ | |||
894 | 1048 | juju_log("config_changed: Exceptions: %s" % str(e)) | 1063 | juju_log("config_changed: Exceptions: %s" % str(e)) |
895 | 1049 | 1064 | ||
896 | 1050 | if mongos_pid is not None: | 1065 | if mongos_pid is not None: |
898 | 1051 | mongos_port = re.search('--port (\w+)', mongos_cmd_line).group(2) | 1066 | mongos_port = re.search('--port (\w+)', mongos_cmd_line).group(1) |
899 | 1052 | disable_mongos(mongos_port) | 1067 | disable_mongos(mongos_port) |
900 | 1053 | enable_mongos(config_data['mongos_port']) | 1068 | enable_mongos(config_data['mongos_port']) |
901 | 1054 | else: | 1069 | else: |
902 | @@ -1058,6 +1073,7 @@ | |||
903 | 1058 | return(True) | 1073 | return(True) |
904 | 1059 | 1074 | ||
905 | 1060 | 1075 | ||
906 | 1076 | @hooks.hook('start') | ||
907 | 1061 | def start_hook(): | 1077 | def start_hook(): |
908 | 1062 | juju_log("start_hook") | 1078 | juju_log("start_hook") |
909 | 1063 | retVal = restart_mongod() | 1079 | retVal = restart_mongod() |
910 | @@ -1065,10 +1081,11 @@ | |||
911 | 1065 | return(retVal) | 1081 | return(retVal) |
912 | 1066 | 1082 | ||
913 | 1067 | 1083 | ||
914 | 1084 | @hooks.hook('stop') | ||
915 | 1068 | def stop_hook(): | 1085 | def stop_hook(): |
916 | 1069 | juju_log("stop_hook") | 1086 | juju_log("stop_hook") |
917 | 1070 | try: | 1087 | try: |
919 | 1071 | retVal = service('mongodb', 'stop') | 1088 | retVal = service('stop', 'mongodb') |
920 | 1072 | os.remove('/var/lib/mongodb/mongod.lock') | 1089 | os.remove('/var/lib/mongodb/mongod.lock') |
921 | 1073 | #FIXME Need to check if this is still needed | 1090 | #FIXME Need to check if this is still needed |
922 | 1074 | except Exception, e: | 1091 | except Exception, e: |
923 | @@ -1079,15 +1096,16 @@ | |||
924 | 1079 | return(retVal) | 1096 | return(retVal) |
925 | 1080 | 1097 | ||
926 | 1081 | 1098 | ||
927 | 1099 | @hooks.hook('database-relation-joined') | ||
928 | 1082 | def database_relation_joined(): | 1100 | def database_relation_joined(): |
929 | 1083 | juju_log("database_relation_joined") | 1101 | juju_log("database_relation_joined") |
930 | 1084 | my_hostname = unit_get('public-address') | 1102 | my_hostname = unit_get('public-address') |
933 | 1085 | my_port = config_get('port') | 1103 | my_port = config('port') |
934 | 1086 | my_replset = config_get('replicaset') | 1104 | my_replset = config('replicaset') |
935 | 1087 | juju_log("my_hostname: %s" % my_hostname) | 1105 | juju_log("my_hostname: %s" % my_hostname) |
936 | 1088 | juju_log("my_port: %s" % my_port) | 1106 | juju_log("my_port: %s" % my_port) |
937 | 1089 | juju_log("my_replset: %s" % my_replset) | 1107 | juju_log("my_replset: %s" % my_replset) |
939 | 1090 | return(relation_set( | 1108 | return(relation_set(relation_id(), |
940 | 1091 | { | 1109 | { |
941 | 1092 | 'hostname': my_hostname, | 1110 | 'hostname': my_hostname, |
942 | 1093 | 'port': my_port, | 1111 | 'port': my_port, |
943 | @@ -1096,34 +1114,36 @@ | |||
944 | 1096 | })) | 1114 | })) |
945 | 1097 | 1115 | ||
946 | 1098 | 1116 | ||
947 | 1117 | @hooks.hook('replicaset-relation-joined') | ||
948 | 1099 | def replica_set_relation_joined(): | 1118 | def replica_set_relation_joined(): |
949 | 1100 | juju_log("replica_set_relation_joined") | 1119 | juju_log("replica_set_relation_joined") |
950 | 1101 | my_hostname = unit_get('public-address') | 1120 | my_hostname = unit_get('public-address') |
953 | 1102 | my_port = config_get('port') | 1121 | my_port = config('port') |
954 | 1103 | my_replset = config_get('replicaset') | 1122 | my_replset = config('replicaset') |
955 | 1104 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] | 1123 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] |
956 | 1105 | juju_log("my_hostname: %s" % my_hostname) | 1124 | juju_log("my_hostname: %s" % my_hostname) |
957 | 1106 | juju_log("my_port: %s" % my_port) | 1125 | juju_log("my_port: %s" % my_port) |
958 | 1107 | juju_log("my_replset: %s" % my_replset) | 1126 | juju_log("my_replset: %s" % my_replset) |
959 | 1108 | juju_log("my_install_order: %s" % my_install_order) | 1127 | juju_log("my_install_order: %s" % my_install_order) |
972 | 1109 | return(enable_replset(my_replset) == | 1128 | enable_replset(my_replset) |
973 | 1110 | restart_mongod() == | 1129 | restart_mongod() |
974 | 1111 | relation_set( | 1130 | |
975 | 1112 | { | 1131 | relation_set(relation_id(), { |
976 | 1113 | 'hostname': my_hostname, | 1132 | 'hostname': my_hostname, |
977 | 1114 | 'port': my_port, | 1133 | 'port': my_port, |
978 | 1115 | 'replset': my_replset, | 1134 | 'replset': my_replset, |
979 | 1116 | 'install-order': my_install_order, | 1135 | 'install-order': my_install_order, |
980 | 1117 | 'type': 'replset', | 1136 | 'type': 'replset', |
981 | 1118 | })) | 1137 | }) |
982 | 1119 | 1138 | ||
983 | 1120 | 1139 | ||
984 | 1140 | @hooks.hook('replicaset-relation-changed') | ||
985 | 1121 | def replica_set_relation_changed(): | 1141 | def replica_set_relation_changed(): |
986 | 1122 | juju_log("replica_set_relation_changed") | 1142 | juju_log("replica_set_relation_changed") |
987 | 1123 | my_hostname = unit_get('public-address') | 1143 | my_hostname = unit_get('public-address') |
989 | 1124 | my_port = config_get('port') | 1144 | my_port = config('port') |
990 | 1125 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] | 1145 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] |
992 | 1126 | my_replicaset_master = config_get('replicaset_master') | 1146 | my_replicaset_master = config('replicaset_master') |
993 | 1127 | 1147 | ||
994 | 1128 | # If we are joining an existing replicaset cluster, just join and leave. | 1148 | # If we are joining an existing replicaset cluster, just join and leave. |
995 | 1129 | if my_replicaset_master != "auto": | 1149 | if my_replicaset_master != "auto": |
996 | @@ -1135,44 +1155,46 @@ | |||
997 | 1135 | master_install_order = my_install_order | 1155 | master_install_order = my_install_order |
998 | 1136 | 1156 | ||
999 | 1137 | # Check the nodes in the relation to find the master | 1157 | # Check the nodes in the relation to find the master |
1001 | 1138 | for member in relation_list(): | 1158 | for member in relations_of_type('replica-set'): |
1002 | 1159 | member = member['__unit__'] | ||
1003 | 1139 | juju_log("replica_set_relation_changed: member: %s" % member) | 1160 | juju_log("replica_set_relation_changed: member: %s" % member) |
1004 | 1140 | hostname = relation_get('hostname', member) | 1161 | hostname = relation_get('hostname', member) |
1005 | 1141 | port = relation_get('port', member) | 1162 | port = relation_get('port', member) |
1010 | 1142 | install_order = relation_get('install-order', member) | 1163 | inst_ordr = relation_get('install-order', member) |
1011 | 1143 | juju_log("replica_set_relation_changed: install_order: %s" % install_order) | 1164 | juju_log("replica_set_relation_changed: install_order: %s" % inst_ordr) |
1012 | 1144 | if install_order is None: | 1165 | if inst_ordr is None: |
1013 | 1145 | juju_log("replica_set_relation_changed: install_order is None. relation is not ready") | 1166 | juju_log("replica_set_relation_changed: install_order is None." |
1014 | 1167 | " relation is not ready") | ||
1015 | 1146 | break | 1168 | break |
1017 | 1147 | if int(install_order) < int(master_install_order): | 1169 | if int(inst_ordr) < int(master_install_order): |
1018 | 1148 | master_hostname = hostname | 1170 | master_hostname = hostname |
1019 | 1149 | master_port = port | 1171 | master_port = port |
1021 | 1150 | master_install_order = install_order | 1172 | master_install_order = inst_ordr |
1022 | 1151 | 1173 | ||
1023 | 1152 | # Initiate the replset | 1174 | # Initiate the replset |
1024 | 1153 | init_replset("%s:%s" % (master_hostname, master_port)) | 1175 | init_replset("%s:%s" % (master_hostname, master_port)) |
1025 | 1154 | 1176 | ||
1026 | 1155 | # Add the rest of the nodes to the replset | 1177 | # Add the rest of the nodes to the replset |
1030 | 1156 | for member in relation_list(): | 1178 | for member in relations_of_type('replica-set'): |
1031 | 1157 | hostname = relation_get('hostname', member) | 1179 | hostname = relation_get('hostname', member['__unit__']) |
1032 | 1158 | port = relation_get('port', member) | 1180 | port = relation_get('port', member['__unit__']) |
1033 | 1159 | if master_hostname != hostname: | 1181 | if master_hostname != hostname: |
1034 | 1160 | if hostname == my_hostname: | 1182 | if hostname == my_hostname: |
1038 | 1161 | subprocess.call(['mongo', | 1183 | subprocess.call(['mongo', '--eval', |
1039 | 1162 | '--eval', | 1184 | "rs.add(\"%s\")" % hostname]) |
1037 | 1163 | "rs.add(\"%s\")" % hostname]) | ||
1040 | 1164 | else: | 1185 | else: |
1041 | 1165 | join_replset("%s:%s" % (master_hostname, master_port), | 1186 | join_replset("%s:%s" % (master_hostname, master_port), |
1043 | 1166 | "%s:%s" % (hostname, port)) | 1187 | "%s:%s" % (hostname, port)) |
1044 | 1167 | 1188 | ||
1045 | 1168 | # Add this node to the replset ( if needed ) | 1189 | # Add this node to the replset ( if needed ) |
1046 | 1169 | if master_hostname != my_hostname: | 1190 | if master_hostname != my_hostname: |
1047 | 1170 | join_replset("%s:%s" % (master_hostname, master_port), | 1191 | join_replset("%s:%s" % (master_hostname, master_port), |
1053 | 1171 | "%s:%s" % (my_hostname, my_port)) | 1192 | "%s:%s" % (my_hostname, my_port)) |
1054 | 1172 | 1193 | ||
1055 | 1173 | return(True) | 1194 | |
1056 | 1174 | 1195 | ||
1057 | 1175 | 1196 | ||
1058 | 1197 | @hooks.hook('data-relation-joined') | ||
1059 | 1176 | def data_relation_joined(): | 1198 | def data_relation_joined(): |
1060 | 1177 | juju_log("data_relation_joined") | 1199 | juju_log("data_relation_joined") |
1061 | 1178 | 1200 | ||
1062 | @@ -1182,6 +1204,7 @@ | |||
1063 | 1182 | })) | 1204 | })) |
1064 | 1183 | 1205 | ||
1065 | 1184 | 1206 | ||
1066 | 1207 | @hooks.hook('data-relation-changed') | ||
1067 | 1185 | def data_relation_changed(): | 1208 | def data_relation_changed(): |
1068 | 1186 | juju_log("data_relation_changed") | 1209 | juju_log("data_relation_changed") |
1069 | 1187 | 1210 | ||
1070 | @@ -1192,57 +1215,60 @@ | |||
1071 | 1192 | return(config_changed()) | 1215 | return(config_changed()) |
1072 | 1193 | 1216 | ||
1073 | 1194 | 1217 | ||
1074 | 1218 | @hooks.hook('data-relation-departed') | ||
1075 | 1195 | def data_relation_departed(): | 1219 | def data_relation_departed(): |
1076 | 1196 | juju_log("data_relation_departed") | 1220 | juju_log("data_relation_departed") |
1077 | 1197 | return(config_changed()) | 1221 | return(config_changed()) |
1078 | 1198 | 1222 | ||
1080 | 1199 | 1223 | @hooks.hook('configsvr-relation-joined') | |
1081 | 1200 | def configsvr_relation_joined(): | 1224 | def configsvr_relation_joined(): |
1082 | 1201 | juju_log("configsvr_relation_joined") | 1225 | juju_log("configsvr_relation_joined") |
1083 | 1202 | my_hostname = unit_get('public-address') | 1226 | my_hostname = unit_get('public-address') |
1085 | 1203 | my_port = config_get('config_server_port') | 1227 | my_port = config('config_server_port') |
1086 | 1204 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] | 1228 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] |
1096 | 1205 | return(relation_set( | 1229 | return(relation_set(relation_id(), |
1097 | 1206 | { | 1230 | { |
1098 | 1207 | 'hostname': my_hostname, | 1231 | 'hostname': my_hostname, |
1099 | 1208 | 'port': my_port, | 1232 | 'port': my_port, |
1100 | 1209 | 'install-order': my_install_order, | 1233 | 'install-order': my_install_order, |
1101 | 1210 | 'type': 'configsvr', | 1234 | 'type': 'configsvr', |
1102 | 1211 | })) | 1235 | })) |
1103 | 1212 | 1236 | ||
1104 | 1213 | 1237 | ||
1105 | 1238 | @hooks.hook('configsvr-relation-changed') | ||
1106 | 1214 | def configsvr_relation_changed(): | 1239 | def configsvr_relation_changed(): |
1107 | 1215 | juju_log("configsvr_relation_changed") | 1240 | juju_log("configsvr_relation_changed") |
1109 | 1216 | config_data = config_get() | 1241 | config_data = config() |
1110 | 1217 | my_port = config_data['config_server_port'] | 1242 | my_port = config_data['config_server_port'] |
1111 | 1218 | disable_configsvr(my_port) | 1243 | disable_configsvr(my_port) |
1117 | 1219 | retVal = enable_configsvr(config_data) | 1244 | |
1118 | 1220 | juju_log("configsvr_relation_changed returns: %s" % retVal) | 1245 | |
1119 | 1221 | return(retVal) | 1246 | @hooks.hook('mongos-cfg-relation-joined') |
1120 | 1222 | 1247 | @hooks.hook('mongos-relation-joined') | |
1116 | 1223 | |||
1121 | 1224 | def mongos_relation_joined(): | 1248 | def mongos_relation_joined(): |
1122 | 1225 | juju_log("mongos_relation_joined") | 1249 | juju_log("mongos_relation_joined") |
1123 | 1226 | my_hostname = unit_get('public-address') | 1250 | my_hostname = unit_get('public-address') |
1125 | 1227 | my_port = config_get('mongos_port') | 1251 | my_port = config('mongos_port') |
1126 | 1228 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] | 1252 | my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] |
1136 | 1229 | return(relation_set( | 1253 | relation_set(relation_id(), |
1137 | 1230 | { | 1254 | { |
1138 | 1231 | 'hostname': my_hostname, | 1255 | 'hostname': my_hostname, |
1139 | 1232 | 'port': my_port, | 1256 | 'port': my_port, |
1140 | 1233 | 'install-order': my_install_order, | 1257 | 'install-order': my_install_order, |
1141 | 1234 | 'type': 'mongos' | 1258 | 'type': 'mongos' |
1142 | 1235 | })) | 1259 | }) |
1143 | 1236 | 1260 | ||
1144 | 1237 | 1261 | ||
1145 | 1262 | @hooks.hook('mongos-cfg-relation-changed') | ||
1146 | 1263 | @hooks.hook('mongos-relation-changed') | ||
1147 | 1238 | def mongos_relation_changed(): | 1264 | def mongos_relation_changed(): |
1148 | 1239 | juju_log("mongos_relation_changed") | 1265 | juju_log("mongos_relation_changed") |
1150 | 1240 | config_data = config_get() | 1266 | config_data = config() |
1151 | 1241 | retVal = False | 1267 | retVal = False |
1156 | 1242 | for member in relation_list(): | 1268 | for member in relations_of_type('mongos-cfg'): |
1157 | 1243 | hostname = relation_get('hostname', member) | 1269 | hostname = relation_get('hostname', member['__unit__']) |
1158 | 1244 | port = relation_get('port', member) | 1270 | port = relation_get('port', member['__unit__']) |
1159 | 1245 | rel_type = relation_get('type', member) | 1271 | rel_type = relation_get('type', member['__unit__']) |
1160 | 1246 | if hostname is None or port is None or rel_type is None: | 1272 | if hostname is None or port is None or rel_type is None: |
1161 | 1247 | juju_log("mongos_relation_changed: relation data not ready.") | 1273 | juju_log("mongos_relation_changed: relation data not ready.") |
1162 | 1248 | break | 1274 | break |
1163 | @@ -1264,34 +1290,33 @@ | |||
1164 | 1264 | if mongos_ready(): | 1290 | if mongos_ready(): |
1165 | 1265 | mongos_host = "%s:%s" % ( | 1291 | mongos_host = "%s:%s" % ( |
1166 | 1266 | unit_get('public-address'), | 1292 | unit_get('public-address'), |
1168 | 1267 | config_get('mongos_port')) | 1293 | config('mongos_port')) |
1169 | 1268 | shard_command1 = "sh.addShard(\"%s:%s\")" % (hostname, port) | 1294 | shard_command1 = "sh.addShard(\"%s:%s\")" % (hostname, port) |
1171 | 1269 | retVal1 = mongo_client(mongos_host, shard_command1) | 1295 | mongo_client(mongos_host, shard_command1) |
1172 | 1270 | replicaset = relation_get('replset', member) | 1296 | replicaset = relation_get('replset', member) |
1174 | 1271 | shard_command2 = "sh.addShard(\"%s/%s:%s\")" % \ | 1297 | shard_command2 = "sh.addShard(\"%s/%s:%s\")" % \ |
1175 | 1272 | (replicaset, hostname, port) | 1298 | (replicaset, hostname, port) |
1181 | 1273 | retVal2 = mongo_client(mongos_host, shard_command2) | 1299 | mongo_client(mongos_host, shard_command2) |
1182 | 1274 | retVal = retVal1 is True and retVal2 is True | 1300 | |
1183 | 1275 | else: | 1301 | |
1179 | 1276 | juju_log("Not enough config server for mongos yet.") | ||
1180 | 1277 | retVal = True | ||
1184 | 1278 | else: | 1302 | else: |
1185 | 1279 | juju_log("mongos_relation_change: undefined rel_type: %s" % | 1303 | juju_log("mongos_relation_change: undefined rel_type: %s" % |
1187 | 1280 | rel_type) | 1304 | rel_type) |
1188 | 1281 | return(False) | 1305 | return(False) |
1189 | 1282 | juju_log("mongos_relation_changed returns: %s" % retVal) | 1306 | juju_log("mongos_relation_changed returns: %s" % retVal) |
1193 | 1283 | return(retVal) | 1307 | |
1194 | 1284 | 1308 | ||
1195 | 1285 | 1309 | ||
1196 | 1310 | @hooks.hook('mongos-relation-broken') | ||
1197 | 1286 | def mongos_relation_broken(): | 1311 | def mongos_relation_broken(): |
1206 | 1287 | # config_servers = load_config_servers(default_mongos_list) | 1312 | config_servers = load_config_servers(default_mongos_list) |
1207 | 1288 | # for member in relation_list(): | 1313 | for member in relations_of_type('mongos'): |
1208 | 1289 | # hostname = relation_get('hostname', member) | 1314 | hostname = relation_get('hostname', member) |
1209 | 1290 | # port = relation_get('port', member) | 1315 | port = relation_get('port', member) |
1210 | 1291 | # if '%s:%s' % (hostname, port) in config_servers: | 1316 | if '%s:%s' % (hostname, port) in config_servers: |
1211 | 1292 | # config_servers.remove('%s:%s' % (hostname, port)) | 1317 | config_servers.remove('%s:%s' % (hostname, port)) |
1212 | 1293 | # return(update_file(default_mongos_list, '\n'.join(config_servers))) | 1318 | |
1213 | 1294 | return(True) | 1319 | update_file(default_mongos_list, '\n'.join(config_servers)) |
1214 | 1295 | 1320 | ||
1215 | 1296 | 1321 | ||
1216 | 1297 | def run(command, exit_on_error=True): | 1322 | def run(command, exit_on_error=True): |
1217 | @@ -1318,7 +1343,7 @@ | |||
1218 | 1318 | # | 1343 | # |
1219 | 1319 | #------------------------------ | 1344 | #------------------------------ |
1220 | 1320 | def volume_get_volid_from_volume_map(): | 1345 | def volume_get_volid_from_volume_map(): |
1222 | 1321 | config_data = config_get() | 1346 | config_data = config() |
1223 | 1322 | volume_map = {} | 1347 | volume_map = {} |
1224 | 1323 | try: | 1348 | try: |
1225 | 1324 | volume_map = yaml.load(config_data['volume-map'].strip()) | 1349 | volume_map = yaml.load(config_data['volume-map'].strip()) |
1226 | @@ -1379,19 +1404,21 @@ | |||
1227 | 1379 | # None config state is invalid - we should not serve | 1404 | # None config state is invalid - we should not serve |
1228 | 1380 | def volume_get_volume_id(): | 1405 | def volume_get_volume_id(): |
1229 | 1381 | 1406 | ||
1230 | 1407 | config_data = config() | ||
1231 | 1408 | |||
1232 | 1409 | |||
1233 | 1382 | 1410 | ||
1234 | 1383 | volid = volume_get_id_for_storage_subordinate() | 1411 | volid = volume_get_id_for_storage_subordinate() |
1235 | 1384 | if volid: | 1412 | if volid: |
1236 | 1385 | return volid | 1413 | return volid |
1237 | 1386 | 1414 | ||
1238 | 1387 | config_data = config_get() | ||
1239 | 1388 | ephemeral_storage = config_data['volume-ephemeral-storage'] | 1415 | ephemeral_storage = config_data['volume-ephemeral-storage'] |
1240 | 1389 | volid = volume_get_volid_from_volume_map() | 1416 | volid = volume_get_volid_from_volume_map() |
1241 | 1390 | juju_unit_name = os.environ['JUJU_UNIT_NAME'] | 1417 | juju_unit_name = os.environ['JUJU_UNIT_NAME'] |
1242 | 1391 | if ephemeral_storage in [True, 'yes', 'Yes', 'true', 'True']: | 1418 | if ephemeral_storage in [True, 'yes', 'Yes', 'true', 'True']: |
1243 | 1392 | if volid: | 1419 | if volid: |
1244 | 1393 | juju_log( | 1420 | juju_log( |
1246 | 1394 | "volume-ephemeral-storage is True, but " + | 1421 | "volume-ephemeral-storage is True, but |
1247 | 1395 | "volume-map[{!r}] -> {}".format(juju_unit_name, volid)) | 1422 | "volume-map[{!r}] -> {}".format(juju_unit_name, volid)) |
1248 | 1396 | return None | 1423 | return None |
1249 | 1397 | else: | 1424 | else: |
1250 | @@ -1424,6 +1451,7 @@ | |||
1251 | 1424 | return None | 1451 | return None |
1252 | 1425 | return output | 1452 | return output |
1253 | 1426 | 1453 | ||
1254 | 1454 | |||
1255 | 1427 | #------------------------------------------------------------------------------ | 1455 | #------------------------------------------------------------------------------ |
1256 | 1428 | # Core logic for permanent storage changes: | 1456 | # Core logic for permanent storage changes: |
1257 | 1429 | # NOTE the only 2 "True" return points: | 1457 | # NOTE the only 2 "True" return points: |
1258 | @@ -1435,7 +1463,7 @@ | |||
1259 | 1435 | # - manipulate /var/lib/mongodb/VERSION/CLUSTER symlink | 1463 | # - manipulate /var/lib/mongodb/VERSION/CLUSTER symlink |
1260 | 1436 | #------------------------------------------------------------------------------ | 1464 | #------------------------------------------------------------------------------ |
1261 | 1437 | def config_changed_volume_apply(): | 1465 | def config_changed_volume_apply(): |
1263 | 1438 | config_data = config_get() | 1466 | config_data = config() |
1264 | 1439 | data_directory_path = config_data["dbpath"] | 1467 | data_directory_path = config_data["dbpath"] |
1265 | 1440 | assert(data_directory_path) | 1468 | assert(data_directory_path) |
1266 | 1441 | volid = volume_get_volume_id() | 1469 | volid = volume_get_volume_id() |
1267 | @@ -1548,57 +1576,6 @@ | |||
1268 | 1548 | ############################################################################### | 1576 | ############################################################################### |
1269 | 1549 | # Main section | 1577 | # Main section |
1270 | 1550 | ############################################################################### | 1578 | ############################################################################### |
1325 | 1551 | if __name__ == '__main__': | 1579 | if __name__ == "__main__": |
1326 | 1552 | parser = argparse.ArgumentParser() | 1580 | # execute a hook based on the name the program is called by |
1327 | 1553 | parser.add_argument('-H', '--hook_name', dest='hook_name', | 1581 | hooks.execute(sys.argv) |
1274 | 1554 | help='hook to call') | ||
1275 | 1555 | args = parser.parse_args() | ||
1276 | 1556 | if args.hook_name is not None: | ||
1277 | 1557 | hook_name = args.hook_name | ||
1278 | 1558 | else: | ||
1279 | 1559 | hook_name = os.path.basename(sys.argv[0]) | ||
1280 | 1560 | |||
1281 | 1561 | if hook_name == "install": | ||
1282 | 1562 | retVal = install_hook() | ||
1283 | 1563 | elif hook_name == "config-changed": | ||
1284 | 1564 | retVal = config_changed() | ||
1285 | 1565 | elif hook_name == "start": | ||
1286 | 1566 | retVal = start_hook() | ||
1287 | 1567 | elif hook_name == "stop": | ||
1288 | 1568 | retVal = stop_hook() | ||
1289 | 1569 | elif hook_name == "database-relation-joined": | ||
1290 | 1570 | retVal = database_relation_joined() | ||
1291 | 1571 | elif hook_name == "replica-set-relation-joined": | ||
1292 | 1572 | retVal = replica_set_relation_joined() | ||
1293 | 1573 | elif hook_name == "replica-set-relation-changed": | ||
1294 | 1574 | retVal = replica_set_relation_changed() | ||
1295 | 1575 | elif hook_name == "configsvr-relation-joined": | ||
1296 | 1576 | retVal = configsvr_relation_joined() | ||
1297 | 1577 | elif hook_name == "configsvr-relation-changed": | ||
1298 | 1578 | retVal = configsvr_relation_changed() | ||
1299 | 1579 | elif hook_name == "mongos-cfg-relation-joined": | ||
1300 | 1580 | retVal = mongos_relation_joined() | ||
1301 | 1581 | elif hook_name == "mongos-cfg-relation-changed": | ||
1302 | 1582 | retVal = mongos_relation_changed() | ||
1303 | 1583 | elif hook_name == "mongos-cfg-relation-broken": | ||
1304 | 1584 | retVal = mongos_relation_broken() | ||
1305 | 1585 | elif hook_name == "mongos-relation-joined": | ||
1306 | 1586 | retVal = mongos_relation_joined() | ||
1307 | 1587 | elif hook_name == "mongos-relation-changed": | ||
1308 | 1588 | retVal = mongos_relation_changed() | ||
1309 | 1589 | elif hook_name == "mongos-relation-broken": | ||
1310 | 1590 | retVal = mongos_relation_broken() | ||
1311 | 1591 | elif hook_name == "data-relation-joined": | ||
1312 | 1592 | retVal = data_relation_joined() | ||
1313 | 1593 | elif hook_name == "data-relation-changed": | ||
1314 | 1594 | retVal = data_relation_changed() | ||
1315 | 1595 | elif hook_name == "data-relation-departed": | ||
1316 | 1596 | retVal = data_relation_departed() | ||
1317 | 1597 | else: | ||
1318 | 1598 | print "Unknown hook" | ||
1319 | 1599 | retVal = False | ||
1320 | 1600 | |||
1321 | 1601 | if retVal is True: | ||
1322 | 1602 | sys.exit(0) | ||
1323 | 1603 | else: | ||
1324 | 1604 | sys.exit(1) | ||
1328 | 1605 | \ No newline at end of file | 1582 | \ No newline at end of file |
1329 | 1606 | 1583 | ||
1330 | === modified file 'hooks/install' | |||
1331 | --- hooks/install 2013-11-25 19:48:00 +0000 | |||
1332 | +++ hooks/install 1970-01-01 00:00:00 +0000 | |||
1333 | @@ -1,5 +0,0 @@ | |||
1334 | 1 | #!/bin/bash | ||
1335 | 2 | |||
1336 | 3 | sudo apt-get install "python-yaml" | ||
1337 | 4 | |||
1338 | 5 | hooks/hooks.py -H install | ||
1339 | 6 | 0 | ||
1340 | === target is u'hooks.py' | |||
1341 | === modified file 'metadata.yaml' | |||
1342 | --- metadata.yaml 2014-06-18 11:13:54 +0000 | |||
1343 | +++ metadata.yaml 2014-07-29 16:34:12 +0000 | |||
1344 | @@ -1,6 +1,9 @@ | |||
1345 | 1 | name: mongodb | 1 | name: mongodb |
1348 | 2 | maintainer: Juan Negron <juan.negron@canonical.com> | 2 | summary: An open-source document database, and the leading NoSQL database |
1349 | 3 | summary: MongoDB (from humongous) is an open-source document database | 3 | maintainers: |
1350 | 4 | - Juan Negron <juan.negron@canonical.com> | ||
1351 | 5 | - Marco Ceppi <marco@ceppi.net> | ||
1352 | 6 | - Charles Butler <chuck@dasroot.net> | ||
1353 | 4 | description: | | 7 | description: | |
1354 | 5 | MongoDB is a high-performance, open source, schema-free document- | 8 | MongoDB is a high-performance, open source, schema-free document- |
1355 | 6 | oriented data store that's easy to deploy, manage and use. It's | 9 | oriented data store that's easy to deploy, manage and use. It's |
1356 | 7 | 10 | ||
1357 | === added file 'tests/200_relate_ceilometer.test' | |||
1358 | --- tests/200_relate_ceilometer.test 1970-01-01 00:00:00 +0000 | |||
1359 | +++ tests/200_relate_ceilometer.test 2014-07-29 16:34:12 +0000 | |||
1360 | @@ -0,0 +1,43 @@ | |||
1361 | 1 | #!/usr/bin/env python3 | ||
1362 | 2 | |||
1363 | 3 | import amulet | ||
1364 | 4 | import pdb | ||
1365 | 5 | |||
1366 | 6 | class TestDeploy(object): | ||
1367 | 7 | |||
1368 | 8 | def __init__(self, time=2500): | ||
1369 | 9 | # Attempt to load the deployment topology from a bundle. | ||
1370 | 10 | self.deploy = amulet.Deployment(series="trusty") | ||
1371 | 11 | |||
1372 | 12 | # If something errored out, attempt to continue by | ||
1373 | 13 | # manually specifying a standalone deployment | ||
1374 | 14 | self.deploy.add('mongodb') | ||
1375 | 15 | self.deploy.add('ceilometer', 'cs:trusty/ceilometer') | ||
1376 | 16 | # send blank configs to finalize the objects in the deployment map | ||
1377 | 17 | self.deploy.configure('mongodb', {}) | ||
1378 | 18 | self.deploy.configure('ceilometer', {}) | ||
1379 | 19 | |||
1380 | 20 | self.deploy.relate('mongodb:database', 'ceilometer:shared-db') | ||
1381 | 21 | |||
1382 | 22 | try: | ||
1383 | 23 | self.deploy.setup(time) | ||
1384 | 24 | self.deploy.sentry.wait(time) | ||
1385 | 25 | except: | ||
1386 | 26 | amulet.raise_status(amulet.FAIL, msg="Environment standup timeout") | ||
1387 | 27 | # sentry = self.deploy.sentry | ||
1388 | 28 | |||
1389 | 29 | def run(self): | ||
1390 | 30 | for test in dir(self): | ||
1391 | 31 | if test.startswith('test_'): | ||
1392 | 32 | getattr(self, test)() | ||
1393 | 33 | |||
1394 | 34 | def test_mongo_relation(self): | ||
1395 | 35 | unit = self.deploy.sentry.unit['ceilometer/0'] | ||
1396 | 36 | mongo = self.deploy.sentry.unit['mongodb/0'].info['public-address'] | ||
1397 | 37 | cont = unit.file_contents('/etc/ceilometer/ceilometer.conf') | ||
1398 | 38 | if mongo not in cont: | ||
1399 | 39 | amulet.raise_status(amulet.FAIL, "Unable to verify ceilometer cfg") | ||
1400 | 40 | |||
1401 | 41 | if __name__ == '__main__': | ||
1402 | 42 | runner = TestDeploy() | ||
1403 | 43 | runner.run() |
Unless my read of the inline diff below is wrong you have some merge conflicts that need addressing. Happy to peek at it again after. Thanks!