Merge lp:~thedac/charm-helpers/dnsha into lp:charm-helpers

Proposed by David Ames on 2016-06-09
Status: Merged
Merged at revision: 584
Proposed branch: lp:~thedac/charm-helpers/dnsha
Merge into: lp:charm-helpers
Diff against target: 546 lines (+393/-19)
6 files modified
charmhelpers/contrib/hahelpers/cluster.py (+59/-10)
charmhelpers/contrib/openstack/ha/utils.py (+111/-0)
charmhelpers/contrib/openstack/ip.py (+7/-4)
tests/contrib/hahelpers/test_cluster_utils.py (+110/-5)
tests/contrib/openstack/ha/test_ha_utils.py (+98/-0)
tests/contrib/openstack/test_ip.py (+8/-0)
To merge this branch: bzr merge lp:~thedac/charm-helpers/dnsha
Reviewer Review Type Date Requested Status
Alex Kavanagh Approve on 2016-06-14
charmers 2016-06-09 Pending
Review via email: mp+297009@code.launchpad.net

Commit Message

DNS HA Helpers

Start the process of moving HA helpers into the openstack contrib area
Add the helper for API charms to use DNS HA
Add validation for new DNS config options in HA
Allow resolve_address to override with DNS or not

Description of the Change

DNS HA Helpers

Start the process of moving HA helpers into the openstack contrib area
Add the helper for API charms to use DNS HA
Add validation for new DNS config options in HA
Allow resolve_address to override with DNS or not

To post a comment you must log in.
lp:~thedac/charm-helpers/dnsha updated on 2016-06-14
582. By Ryan Beisner on 2016-06-13

[jamespage,r=1chb1n] Update amulet helpers to deploy charms from the charm store

As part of the migration to git/gerrit and with the introduction of
layered charms, the charm store really needs to be the definative
source of truth for all charms during test.

Switch the charm resolver code to pick the correct charms from the
charm store for amulet tests.

This does change behaviour in that base charms are deployed aligned
to the test series where possible; otherwise the most recent Ubuntu
series is used instead - results in mysql/trusty + keystone/xenial
for example.

583. By James Page on 2016-06-14

Enable default git repo generation if a default openstack-origin-git value is specified.

Alex Kavanagh (ajkavanagh) wrote :

Just some discussion on status_set() - see inline comments.

David Ames (thedac) wrote :

Response inline

David Ames (thedac) wrote :

As per request, Exceptions are now thrown immediately for Incomplete or Incorrect HA configurations to avoid confusion.

Alex Kavanagh (ajkavanagh) wrote :

Looks good.

review: Approve
lp:~thedac/charm-helpers/dnsha updated on 2016-06-14
584. By David Ames on 2016-06-14

[thedac, r=tinwood] DNS HA Helpers

Start the process of moving HA helpers into the openstack contrib area
Add the helper for API charms to use DNS HA
Add validation for new DNS config options in HA
Allow resolve_address to override with DNS or not

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charmhelpers/contrib/hahelpers/cluster.py'
2--- charmhelpers/contrib/hahelpers/cluster.py 2015-06-05 19:03:02 +0000
3+++ charmhelpers/contrib/hahelpers/cluster.py 2016-06-14 16:27:51 +0000
4@@ -41,10 +41,11 @@
5 relation_get,
6 config as config_get,
7 INFO,
8- ERROR,
9+ DEBUG,
10 WARNING,
11 unit_get,
12- is_leader as juju_is_leader
13+ is_leader as juju_is_leader,
14+ status_set,
15 )
16 from charmhelpers.core.decorators import (
17 retry_on_exception,
18@@ -60,6 +61,10 @@
19 pass
20
21
22+class HAIncorrectConfig(Exception):
23+ pass
24+
25+
26 class CRMResourceNotFound(Exception):
27 pass
28
29@@ -274,27 +279,71 @@
30 Obtains all relevant configuration from charm configuration required
31 for initiating a relation to hacluster:
32
33- ha-bindiface, ha-mcastport, vip
34+ ha-bindiface, ha-mcastport, vip, os-internal-hostname,
35+ os-admin-hostname, os-public-hostname
36
37 param: exclude_keys: list of setting key(s) to be excluded.
38 returns: dict: A dict containing settings keyed by setting name.
39- raises: HAIncompleteConfig if settings are missing.
40+ raises: HAIncompleteConfig if settings are missing or incorrect.
41 '''
42- settings = ['ha-bindiface', 'ha-mcastport', 'vip']
43+ settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
44+ 'os-admin-hostname', 'os-public-hostname']
45 conf = {}
46 for setting in settings:
47 if exclude_keys and setting in exclude_keys:
48 continue
49
50 conf[setting] = config_get(setting)
51- missing = []
52- [missing.append(s) for s, v in six.iteritems(conf) if v is None]
53- if missing:
54- log('Insufficient config data to configure hacluster.', level=ERROR)
55- raise HAIncompleteConfig
56+
57+ if not valid_hacluster_config():
58+ raise HAIncorrectConfig('Insufficient or incorrect config data to '
59+ 'configure hacluster.')
60 return conf
61
62
63+def valid_hacluster_config():
64+ '''
65+ Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
66+ must be set.
67+
68+ Note: ha-bindiface and ha-macastport both have defaults and will always
69+ be set. We only care that either vip or dns-ha is set.
70+
71+ :returns: boolean: valid config returns true.
72+ raises: HAIncompatibileConfig if settings conflict.
73+ raises: HAIncompleteConfig if settings are missing.
74+ '''
75+ vip = config_get('vip')
76+ dns = config_get('dns-ha')
77+ if not(bool(vip) ^ bool(dns)):
78+ msg = ('HA: Either vip or dns-ha must be set but not both in order to '
79+ 'use high availability')
80+ status_set('blocked', msg)
81+ raise HAIncorrectConfig(msg)
82+
83+ # If dns-ha then one of os-*-hostname must be set
84+ if dns:
85+ dns_settings = ['os-internal-hostname', 'os-admin-hostname',
86+ 'os-public-hostname']
87+ # At this point it is unknown if one or all of the possible
88+ # network spaces are in HA. Validate at least one is set which is
89+ # the minimum required.
90+ for setting in dns_settings:
91+ if config_get(setting):
92+ log('DNS HA: At least one hostname is set {}: {}'
93+ ''.format(setting, config_get(setting)),
94+ level=DEBUG)
95+ return True
96+
97+ msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
98+ 'DNS HA')
99+ status_set('blocked', msg)
100+ raise HAIncompleteConfig(msg)
101+
102+ log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
103+ return True
104+
105+
106 def canonical_url(configs, vip_setting='vip'):
107 '''
108 Returns the correct HTTP URL to this host given the state of HTTPS
109
110=== added directory 'charmhelpers/contrib/openstack/ha'
111=== added file 'charmhelpers/contrib/openstack/ha/__init__.py'
112=== added file 'charmhelpers/contrib/openstack/ha/utils.py'
113--- charmhelpers/contrib/openstack/ha/utils.py 1970-01-01 00:00:00 +0000
114+++ charmhelpers/contrib/openstack/ha/utils.py 2016-06-14 16:27:51 +0000
115@@ -0,0 +1,111 @@
116+# Copyright 2014-2016 Canonical Limited.
117+#
118+# This file is part of charm-helpers.
119+#
120+# charm-helpers is free software: you can redistribute it and/or modify
121+# it under the terms of the GNU Lesser General Public License version 3 as
122+# published by the Free Software Foundation.
123+#
124+# charm-helpers is distributed in the hope that it will be useful,
125+# but WITHOUT ANY WARRANTY; without even the implied warranty of
126+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
127+# GNU Lesser General Public License for more details.
128+#
129+# You should have received a copy of the GNU Lesser General Public License
130+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
131+
132+#
133+# Copyright 2016 Canonical Ltd.
134+#
135+# Authors:
136+# Openstack Charmers <
137+#
138+
139+"""
140+Helpers for high availability.
141+"""
142+
143+import re
144+
145+from charmhelpers.core.hookenv import (
146+ log,
147+ relation_set,
148+ charm_name,
149+ config,
150+ status_set,
151+ DEBUG,
152+)
153+
154+from charmhelpers.contrib.openstack.ip import (
155+ resolve_address,
156+)
157+
158+
159+class DNSHAException(Exception):
160+ """Raised when an error occurs setting up DNS HA
161+ """
162+
163+ pass
164+
165+
166+def update_dns_ha_resource_params(resources, resource_params,
167+ relation_id=None,
168+ crm_ocf='ocf:maas:dns'):
169+ """ Check for os-*-hostname settings and update resource dictionaries for
170+ the HA relation.
171+
172+ @param resources: Pointer to dictionary of resources.
173+ Usually instantiated in ha_joined().
174+ @param resource_params: Pointer to dictionary of resource parameters.
175+ Usually instantiated in ha_joined()
176+ @param relation_id: Relation ID of the ha relation
177+ @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
178+ DNS HA
179+ """
180+
181+ settings = ['os-admin-hostname', 'os-internal-hostname',
182+ 'os-public-hostname']
183+
184+ # Check which DNS settings are set and update dictionaries
185+ hostname_group = []
186+ for setting in settings:
187+ hostname = config(setting)
188+ if hostname is None:
189+ log('DNS HA: Hostname setting {} is None. Ignoring.'
190+ ''.format(setting),
191+ DEBUG)
192+ continue
193+ m = re.search('os-(.+?)-hostname', setting)
194+ if m:
195+ networkspace = m.group(1)
196+ else:
197+ msg = ('Unexpected DNS hostname setting: {}. '
198+ 'Cannot determine network space name'
199+ ''.format(setting))
200+ status_set('blocked', msg)
201+ raise DNSHAException(msg)
202+
203+ hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
204+ if hostname_key in hostname_group:
205+ log('DNS HA: Resource {}: {} already exists in '
206+ 'hostname group - skipping'.format(hostname_key, hostname),
207+ DEBUG)
208+ continue
209+
210+ hostname_group.append(hostname_key)
211+ resources[hostname_key] = crm_ocf
212+ resource_params[hostname_key] = (
213+ 'params fqdn="{}" ip_address="{}" '
214+ ''.format(hostname, resolve_address(endpoint_type=networkspace,
215+ override=False)))
216+
217+ if len(hostname_group) >= 1:
218+ log('DNS HA: Hostname group is set with {} as members. '
219+ 'Informing the ha relation'.format(' '.join(hostname_group)),
220+ DEBUG)
221+ relation_set(relation_id=relation_id, groups={
222+ 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
223+ else:
224+ msg = 'DNS HA: Hostname group has no members.'
225+ status_set('blocked', msg)
226+ raise DNSHAException(msg)
227
228=== modified file 'charmhelpers/contrib/openstack/ip.py'
229--- charmhelpers/contrib/openstack/ip.py 2016-03-21 16:34:40 +0000
230+++ charmhelpers/contrib/openstack/ip.py 2016-06-14 16:27:51 +0000
231@@ -109,7 +109,7 @@
232 return addr_override.format(service_name=service_name())
233
234
235-def resolve_address(endpoint_type=PUBLIC):
236+def resolve_address(endpoint_type=PUBLIC, override=True):
237 """Return unit address depending on net config.
238
239 If unit is clustered with vip(s) and has net splits defined, return vip on
240@@ -119,10 +119,13 @@
241 split if one is configured, or a Juju 2.0 extra-binding has been used.
242
243 :param endpoint_type: Network endpoing type
244+ :param override: Accept hostname overrides or not
245 """
246- resolved_address = _get_address_override(endpoint_type)
247- if resolved_address:
248- return resolved_address
249+ resolved_address = None
250+ if override:
251+ resolved_address = _get_address_override(endpoint_type)
252+ if resolved_address:
253+ return resolved_address
254
255 vips = config('vip')
256 if vips:
257
258=== modified file 'tests/contrib/hahelpers/test_cluster_utils.py'
259--- tests/contrib/hahelpers/test_cluster_utils.py 2015-06-05 19:03:02 +0000
260+++ tests/contrib/hahelpers/test_cluster_utils.py 2016-06-14 16:27:51 +0000
261@@ -318,47 +318,62 @@
262 port = cluster_utils.determine_apache_port(9696, singlenode_mode=True)
263 self.assertEquals(9686, port)
264
265- def test_get_hacluster_config_complete(self):
266+ @patch.object(cluster_utils, 'valid_hacluster_config')
267+ def test_get_hacluster_config_complete(self, valid_hacluster_config):
268 '''It fetches all hacluster charm config'''
269 conf = {
270 'ha-bindiface': 'eth1',
271 'ha-mcastport': '3333',
272 'vip': '10.0.0.1',
273+ 'os-admin-hostname': None,
274+ 'os-public-hostname': None,
275+ 'os-internal-hostname': None,
276 }
277
278+ valid_hacluster_config.return_value = True
279+
280 def _fake_config_get(setting):
281 return conf[setting]
282
283 self.config_get.side_effect = _fake_config_get
284 self.assertEquals(conf, cluster_utils.get_hacluster_config())
285
286- def test_get_hacluster_config_incomplete(self):
287+ @patch.object(cluster_utils, 'valid_hacluster_config')
288+ def test_get_hacluster_config_incomplete(self, valid_hacluster_config):
289 '''It raises exception if some hacluster charm config missing'''
290 conf = {
291 'ha-bindiface': 'eth1',
292 'ha-mcastport': '3333',
293 'vip': None,
294+ 'os-admin-hostname': None,
295+ 'os-public-hostname': None,
296+ 'os-internal-hostname': None,
297 }
298
299+ valid_hacluster_config.return_value = False
300+
301 def _fake_config_get(setting):
302 return conf[setting]
303
304 self.config_get.side_effect = _fake_config_get
305- self.assertRaises(cluster_utils.HAIncompleteConfig,
306+ self.assertRaises(cluster_utils.HAIncorrectConfig,
307 cluster_utils.get_hacluster_config)
308
309- def test_get_hacluster_config_with_excludes(self):
310+ @patch.object(cluster_utils, 'valid_hacluster_config')
311+ def test_get_hacluster_config_with_excludes(self, valid_hacluster_config):
312 '''It fetches all hacluster charm config'''
313 conf = {
314 'ha-bindiface': 'eth1',
315 'ha-mcastport': '3333',
316 }
317+ valid_hacluster_config.return_value = True
318
319 def _fake_config_get(setting):
320 return conf[setting]
321
322 self.config_get.side_effect = _fake_config_get
323- exclude_keys = ['vip']
324+ exclude_keys = ['vip', 'os-admin-hostname', 'os-internal-hostname',
325+ 'os-public-hostname']
326 result = cluster_utils.get_hacluster_config(exclude_keys)
327 self.assertEquals(conf, result)
328
329@@ -406,3 +421,93 @@
330 configs.complete_contexts.return_value = []
331 url = cluster_utils.canonical_url(configs)
332 self.assertEquals('http://10.0.0.1', url)
333+
334+ @patch.object(cluster_utils, 'status_set')
335+ def test_valid_hacluster_config_incomplete(self, status_set):
336+ '''Returns False with incomplete HA config'''
337+ conf = {
338+ 'vip': None,
339+ 'os-admin-hostname': None,
340+ 'os-public-hostname': None,
341+ 'os-internal-hostname': None,
342+ 'dns-ha': False,
343+ }
344+
345+ def _fake_config_get(setting):
346+ return conf[setting]
347+
348+ self.config_get.side_effect = _fake_config_get
349+ self.assertRaises(cluster_utils.HAIncorrectConfig,
350+ cluster_utils.valid_hacluster_config)
351+
352+ @patch.object(cluster_utils, 'status_set')
353+ def test_valid_hacluster_config_both(self, status_set):
354+ '''Returns False when both VIP and DNS HA are set'''
355+ conf = {
356+ 'vip': '10.0.0.1',
357+ 'os-admin-hostname': None,
358+ 'os-public-hostname': None,
359+ 'os-internal-hostname': None,
360+ 'dns-ha': True,
361+ }
362+
363+ def _fake_config_get(setting):
364+ return conf[setting]
365+
366+ self.config_get.side_effect = _fake_config_get
367+ self.assertRaises(cluster_utils.HAIncorrectConfig,
368+ cluster_utils.valid_hacluster_config)
369+
370+ @patch.object(cluster_utils, 'status_set')
371+ def test_valid_hacluster_config_vip_ha(self, status_set):
372+ '''Returns True with complete VIP HA config'''
373+ conf = {
374+ 'vip': '10.0.0.1',
375+ 'os-admin-hostname': None,
376+ 'os-public-hostname': None,
377+ 'os-internal-hostname': None,
378+ 'dns-ha': False,
379+ }
380+
381+ def _fake_config_get(setting):
382+ return conf[setting]
383+
384+ self.config_get.side_effect = _fake_config_get
385+ self.assertTrue(cluster_utils.valid_hacluster_config())
386+ self.assertFalse(status_set.called)
387+
388+ @patch.object(cluster_utils, 'status_set')
389+ def test_valid_hacluster_config_dns_incomplete(self, status_set):
390+ '''Returns False with incomplete DNS HA config'''
391+ conf = {
392+ 'vip': None,
393+ 'os-admin-hostname': None,
394+ 'os-public-hostname': None,
395+ 'os-internal-hostname': None,
396+ 'dns-ha': True,
397+ }
398+
399+ def _fake_config_get(setting):
400+ return conf[setting]
401+
402+ self.config_get.side_effect = _fake_config_get
403+ self.assertRaises(cluster_utils.HAIncompleteConfig,
404+ cluster_utils.valid_hacluster_config)
405+
406+ @patch.object(cluster_utils, 'status_set')
407+ def test_valid_hacluster_config_dns_ha(self, status_set):
408+ '''Returns True with complete DNS HA config'''
409+ conf = {
410+ 'vip': None,
411+ 'os-admin-hostname': 'somehostname',
412+ 'os-public-hostname': None,
413+ 'os-internal-hostname': None,
414+ 'dns-ha': True,
415+ }
416+
417+ def _fake_config_get(setting):
418+ return conf[setting]
419+
420+ self.config_get.side_effect = _fake_config_get
421+ self.assertTrue(cluster_utils.valid_hacluster_config())
422+ self.assertFalse(status_set.called)
423
424=== added directory 'tests/contrib/openstack/ha'
425=== added file 'tests/contrib/openstack/ha/__init__.py'
426=== added file 'tests/contrib/openstack/ha/test_ha_utils.py'
427--- tests/contrib/openstack/ha/test_ha_utils.py 1970-01-01 00:00:00 +0000
428+++ tests/contrib/openstack/ha/test_ha_utils.py 2016-06-14 16:27:51 +0000
429@@ -0,0 +1,98 @@
430+from mock import patch
431+import unittest
432+
433+from charmhelpers.contrib.openstack.ha import utils as ha
434+
435+
436+class HATests(unittest.TestCase):
437+ def setUp(self):
438+ super(HATests, self).setUp()
439+ [self._patch(m) for m in [
440+ 'charm_name',
441+ 'config',
442+ 'relation_set',
443+ 'resolve_address',
444+ 'status_set',
445+ ]]
446+ self.resources = {'res_test_haproxy': 'lsb:haproxy'}
447+ self.resource_params = {'res_test_haproxy': 'op monitor interval="5s"'}
448+ self.conf = {}
449+ self.config.side_effect = lambda key: self.conf.get(key)
450+
451+ def _patch(self, method):
452+ _m = patch.object(ha, method)
453+ mock = _m.start()
454+ self.addCleanup(_m.stop)
455+ setattr(self, method, mock)
456+
457+ def test_update_dns_ha_resource_params_none(self):
458+ self.conf = {
459+ 'os-admin-hostname': None,
460+ 'os-internal-hostname': None,
461+ 'os-public-hostname': None,
462+ }
463+
464+ with self.assertRaises(ha.DNSHAException):
465+ ha.update_dns_ha_resource_params(
466+ relation_id='ha:1',
467+ resources=self.resources,
468+ resource_params=self.resource_params)
469+
470+ def test_update_dns_ha_resource_params_one(self):
471+ EXPECTED_RESOURCES = {'res_test_public_hostname': 'ocf:maas:dns',
472+ 'res_test_haproxy': 'lsb:haproxy'}
473+ EXPECTED_RESOURCE_PARAMS = {
474+ 'res_test_public_hostname': ('params fqdn="test.maas" '
475+ 'ip_address="10.0.0.1" '),
476+ 'res_test_haproxy': 'op monitor interval="5s"'}
477+
478+ self.conf = {
479+ 'os-admin-hostname': None,
480+ 'os-internal-hostname': None,
481+ 'os-public-hostname': 'test.maas',
482+ }
483+
484+ self.charm_name.return_value = 'test'
485+ self.resolve_address.return_value = '10.0.0.1'
486+ ha.update_dns_ha_resource_params(relation_id='ha:1',
487+ resources=self.resources,
488+ resource_params=self.resource_params)
489+ self.assertEqual(self.resources, EXPECTED_RESOURCES)
490+ self.assertEqual(self.resource_params, EXPECTED_RESOURCE_PARAMS)
491+ self.relation_set.assert_called_with(
492+ groups={'grp_test_hostnames': 'res_test_public_hostname'},
493+ relation_id='ha:1')
494+
495+ def test_update_dns_ha_resource_params_all(self):
496+ EXPECTED_RESOURCES = {'res_test_admin_hostname': 'ocf:maas:dns',
497+ 'res_test_internal_hostname': 'ocf:maas:dns',
498+ 'res_test_public_hostname': 'ocf:maas:dns',
499+ 'res_test_haproxy': 'lsb:haproxy'}
500+ EXPECTED_RESOURCE_PARAMS = {
501+ 'res_test_admin_hostname': ('params fqdn="test.admin.maas" '
502+ 'ip_address="10.0.0.1" '),
503+ 'res_test_internal_hostname': ('params fqdn="test.internal.maas" '
504+ 'ip_address="10.0.0.1" '),
505+ 'res_test_public_hostname': ('params fqdn="test.public.maas" '
506+ 'ip_address="10.0.0.1" '),
507+ 'res_test_haproxy': 'op monitor interval="5s"'}
508+
509+ self.conf = {
510+ 'os-admin-hostname': 'test.admin.maas',
511+ 'os-internal-hostname': 'test.internal.maas',
512+ 'os-public-hostname': 'test.public.maas',
513+ }
514+
515+ self.charm_name.return_value = 'test'
516+ self.resolve_address.return_value = '10.0.0.1'
517+ ha.update_dns_ha_resource_params(relation_id='ha:1',
518+ resources=self.resources,
519+ resource_params=self.resource_params)
520+ self.assertEqual(self.resources, EXPECTED_RESOURCES)
521+ self.assertEqual(self.resource_params, EXPECTED_RESOURCE_PARAMS)
522+ self.relation_set.assert_called_with(
523+ groups={'grp_test_hostnames':
524+ ('res_test_admin_hostname '
525+ 'res_test_internal_hostname '
526+ 'res_test_public_hostname')},
527+ relation_id='ha:1')
528
529=== modified file 'tests/contrib/openstack/test_ip.py'
530--- tests/contrib/openstack/test_ip.py 2016-03-30 21:04:15 +0000
531+++ tests/contrib/openstack/test_ip.py 2016-06-14 16:27:51 +0000
532@@ -101,6 +101,14 @@
533 addr = ip.resolve_address()
534 self.assertEqual('public.example.com', addr)
535
536+ @patch.object(ip, '_get_address_override')
537+ def test_resolve_address_no_override(self, _get_address_override):
538+ self.test_config.set('os-public-hostname', 'public.example.com')
539+ self.unit_get.return_value = '10.0.0.1'
540+ addr = ip.resolve_address(override=False)
541+ self.assertFalse(_get_address_override.called)
542+ self.assertEqual('10.0.0.1', addr)
543+
544 def test_resolve_address_override_template(self):
545 self.test_config.set('os-public-hostname',
546 '{service_name}.example.com')

Subscribers

People subscribed via source and target branches