Merge lp:~jjo/openstack-mojo-specs/mojo-openstack-specs-add-mojo-bootstack-specs into lp:openstack-mojo-specs

Proposed by JuanJo Ciarlante
Status: Work in progress
Proposed branch: lp:~jjo/openstack-mojo-specs/mojo-openstack-specs-add-mojo-bootstack-specs
Merge into: lp:openstack-mojo-specs
Diff against target: 1843 lines (+1281/-72)
22 files modified
helper/setup/allow_vips_addresses.py (+75/-0)
helper/setup/bootstrap_ha.py (+74/-0)
helper/setup/images.yaml (+2/-1)
helper/setup/network_setup.py (+28/-15)
helper/tests/simple_os_checks.py (+3/-0)
helper/tests/test_obj_store.py (+12/-0)
helper/utils/mojo_os_utils.py (+25/-19)
helper/utils/mojo_utils.py (+93/-19)
specs/bootstack/customer/configs/bootstack-charms.bzr (+35/-0)
specs/bootstack/customer/configs/bootstack-example.yaml (+735/-0)
specs/bootstack/customer/configs/bootstack-repo (+24/-0)
specs/bootstack/customer/configs/glance-streams.yaml (+7/-0)
specs/bootstack/customer/configs/keystone_users.yaml (+5/-0)
specs/bootstack/customer/configs/network.yaml (+14/-0)
specs/bootstack/ha_phased/icehouse/SPEC_INFO.txt (+2/-0)
specs/bootstack/ha_phased/icehouse/bootstack-charms-1504.bzr (+35/-0)
specs/bootstack/ha_phased/icehouse/bootstack-charms-trunk.bzr (+35/-0)
specs/bootstack/ha_phased/icehouse/fix-lxcbr0.sh (+2/-0)
specs/bootstack/ha_phased/icehouse/manifest (+53/-0)
specs/dev/nova_cc_legacy_neutron/simple_os_checks.py (+3/-0)
specs/dev/nova_cc_legacy_neutron/utils/mojo_os_utils.py (+18/-17)
specs/dev/nova_cc_legacy_neutron/utils/mojo_utils.py (+1/-1)
To merge this branch: bzr merge lp:~jjo/openstack-mojo-specs/mojo-openstack-specs-add-mojo-bootstack-specs
Reviewer Review Type Date Requested Status
OpenStack Charm Testing Maintainers Pending
Review via email: mp+257578@code.launchpad.net
To post a comment you must log in.

Unmerged revisions

219. By JuanJo Ciarlante

[jjo, r=] add bootstack to mojo-openstack-specs
* generalize mojo_utils.py and mojo_os_utils.py functions to support
  arbitrary net_name, subnet_name, etc
* network_setup.py: support skipping undercloud network setup if undercloud
  is not nova (eg MaaS)
* add helper/setup/allow_vips_addresses.py, similar to setup_vips.py but
  with a different approach to carve unused IPs, plus doing neutron
  allowed_address_pairs to VIP'd service units
* add helper/setup/bootstrap_ha.py
* add bootstack-charms-1504.bzr
* simple_os_checks.py: add --net_name arg, move it off mojo_os_utils.py
* add specs/bootstack/ha_phased and specs/bootstack/customer/configs
  - based on 'bootstack-example' configs branch
  - has secrets fully included, to avoid the need for /srv/mojo/LOCAL
    setup
  - note the extensive usage of YAML aliases (~varnames)
  - note the 'mysql' servicename (albeit using percona-cluster charm)
  - stages:
    - stage1: services that reside in metal
    - stage2: LXC'able and smooshed services, hacluster srv+relations
    - stage2-relations: inter-service relations
    - stage2-complete: num_units from 2 to 3, for VIP'd services

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'helper/setup/allow_vips_addresses.py'
2--- helper/setup/allow_vips_addresses.py 1970-01-01 00:00:00 +0000
3+++ helper/setup/allow_vips_addresses.py 2015-04-27 20:19:34 +0000
4@@ -0,0 +1,75 @@
5+#!/usr/bin/python
6+import utils.mojo_utils as mojo_utils
7+import utils.mojo_os_utils as mojo_os_utils
8+import netaddr
9+import logging
10+
11+
12+class VipPool():
13+ def __init__(self, neutronc):
14+ prov_net = mojo_utils.get_undercload_netid()
15+ if prov_net:
16+ net = neutronc.list_networks(name=prov_net)['networks'][0]
17+ else:
18+ net = mojo_os_utils.get_admin_net(neutronc)
19+ subnet_id = net['subnets'][0]
20+ subnet = neutronc.list_subnets(id=subnet_id)['subnets'][0]
21+ ports = neutronc.list_ports()
22+ used_ips = [p.get('fixed_ips') for p in ports['ports']]
23+ used_ips = [ip[0].get('ip_address') for ip in used_ips if ip]
24+ self.port_by_device_id = {p['device_id']:p for p in ports['ports']}
25+
26+ self.cidr = subnet['cidr']
27+ self.available_ips = []
28+ # shamelessly scan the whole cidr, saving unused IPs (not in any port)
29+ for ip in list(netaddr.IPNetwork(self.cidr))[1:-1]:
30+ ip = str(ip)
31+ if not ip in used_ips:
32+ self.available_ips.append(ip)
33+
34+ def get_port_by_device_id(self, uuid):
35+ return self.port_by_device_id.get(uuid)
36+
37+ def get_next(self):
38+ try:
39+ return self.available_ips.pop()
40+ except IndexError:
41+ raise Exception("vip pool exhausted")
42+
43+logging.basicConfig(level=logging.INFO)
44+undercloud_novarc = mojo_utils.get_undercload_auth()
45+neutronc = mojo_os_utils.get_neutron_client(undercloud_novarc)
46+vp = VipPool(neutronc)
47+juju_status = mojo_utils.get_juju_status()
48+new_vip = None
49+for svc in juju_status['services']:
50+ # we don't use rabbitmq's vip but rather let it cluster and
51+ # use all endpoints, skip
52+ if svc.startswith("rabbitmq"):
53+ continue
54+ if not 'vip' in mojo_utils.juju_get_config_keys(svc):
55+ continue
56+ vip = mojo_utils.juju_get(svc, "vip")
57+ if vip and vip in vp.available_ips:
58+ logging.info("service=%s vip=%s Ok", svc, vip)
59+ else:
60+ new_vip = vp.get_next()
61+ if vip:
62+ logging.warning("service=%s vip=%s is in use, changing to new_vip=%s",
63+ svc, vip, new_vip)
64+ else:
65+ logging.info("service=%s has no vip, using new_vip=%s", svc, new_vip)
66+ mojo_utils.juju_set(svc, '%s=%s' % ('vip', new_vip), wait=False)
67+ # add allowed_address_pairs containing the vip to each unit
68+ for u_name, u_data in juju_status["services"][svc]['units'].items():
69+ machine = u_data['machine']
70+ nova_id = juju_status['machines'][machine]['instance-id']
71+ port = vp.get_port_by_device_id(nova_id)
72+ logging.info("unit=%s nova_id=%s port_id=%s: allowing vip=%s",
73+ u_name, nova_id, port['id'], vip)
74+ pairs = [{'ip_address': vip}]
75+ neutronc.update_port(port['id'], {'port': {'allowed_address_pairs': pairs}})
76+
77+# If there was any new_vip, wait
78+if new_vip:
79+ mojo_utils.juju_wait_finished()
80
81=== added file 'helper/setup/bootstrap_ha.py'
82--- helper/setup/bootstrap_ha.py 1970-01-01 00:00:00 +0000
83+++ helper/setup/bootstrap_ha.py 2015-04-27 20:19:34 +0000
84@@ -0,0 +1,74 @@
85+#!/usr/bin/python
86+import sys
87+import utils.mojo_utils as mojo_utils
88+import logging
89+import argparse
90+import time
91+
92+
93+def bootstrap_ha(m0_constraints=None,
94+ m12_constraints=None,
95+ rest_constraints=None,
96+ ha_servicename=None):
97+ if m0_constraints:
98+ constraints = ("--constraints", m0_constraints)
99+ else:
100+ constraints = ()
101+ mojo_utils.bootstrap(*constraints)
102+
103+ # hack to reset constraints for the rest of the env
104+ # as there's no unset-constraints:
105+ mojo_utils.set_constraints("tags=")
106+
107+ if m12_constraints:
108+ constraints = ("--constraints", m12_constraints)
109+ else:
110+ constraints = ()
111+ mojo_utils.ensure_availability(*constraints)
112+
113+ if rest_constraints:
114+ mojo_utils.set_constraints(rest_constraints)
115+
116+ logging.info("Waiting for environment to settle")
117+ mojo_utils.juju_status_check_and_wait()
118+
119+ # add a dummy service to HA units, to ease referring to them
120+ # by this juju service name
121+ if (ha_servicename):
122+ # deploy ha_servicename dummy service to machine: [0, 1, 2]
123+ units = mojo_utils.get_juju_units(None, ha_servicename)
124+ if not "{}/0".format(ha_servicename) in units:
125+ logging.info("deploying %s/0", ha_servicename)
126+ mojo_utils.deploy("cs:trusty/ubuntu", ha_servicename, "--to", "0")
127+ else:
128+ logging.info("%s/0 unit already present", ha_servicename)
129+ for num in (1, 2):
130+ if not "{}/{}".format(ha_servicename, num) in units:
131+ logging.info("adding %s/%d", ha_servicename, num)
132+ mojo_utils.add_unit(ha_servicename, 1, "--to", str(num))
133+ time.sleep(30)
134+ else:
135+ logging.info("%s/%d unit already present", ha_servicename, num)
136+ mojo_utils.juju_status_check_and_wait()
137+ return True
138+
139+
140+def main(argv):
141+ logging.basicConfig(level=logging.INFO)
142+ parser = argparse.ArgumentParser()
143+ parser.add_argument("--m0_constraints")
144+ parser.add_argument("--m12_constraints")
145+ parser.add_argument("--rest_constraints")
146+ parser.add_argument("--ha_servicename")
147+ options = parser.parse_args()
148+ m0_c = mojo_utils.parse_mojo_arg(options, 'm0_constraints')
149+ m12_c = mojo_utils.parse_mojo_arg(options, 'm12_constraints')
150+ rest_c = mojo_utils.parse_mojo_arg(options, 'rest_constraints')
151+ ha_svc = mojo_utils.parse_mojo_arg(options, 'ha_servicename')
152+ logging.info("Bootstrapping with: bootstrap_ha(%s, %s, %s, %s)",
153+ m0_c, m12_c, rest_c, ha_svc)
154+ return 0 if bootstrap_ha(m0_c, m12_c, rest_c, ha_svc) else 1
155+
156+
157+if __name__ == '__main__':
158+ sys.exit(main(sys.argv))
159
160=== modified file 'helper/setup/images.yaml'
161--- helper/setup/images.yaml 2014-12-16 15:41:15 +0000
162+++ helper/setup/images.yaml 2015-04-27 20:19:34 +0000
163@@ -7,7 +7,8 @@
164 disk_format: qcow2
165 glance_name: cirros
166 is_public: 'true'
167- url: http://10.245.160.50:80/swift/v1/images/cirros-0.3.3-x86_64-disk.img
168+# url: http://10.245.160.50:80/swift/v1/images/cirros-0.3.3-x86_64-disk.img
169+ url: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
170 precise:
171 auth_type: privkey
172 bootstring: cloud-init boot finished at
173
174=== modified file 'helper/setup/network_setup.py'
175--- helper/setup/network_setup.py 2015-02-10 07:55:00 +0000
176+++ helper/setup/network_setup.py 2015-04-27 20:19:34 +0000
177@@ -15,22 +15,29 @@
178 # Resolve the tenant name from the overcloud novarc into a tenant id
179 tenant_id = mojo_os_utils.get_tenant_id(keystonec,
180 overcloud_novarc['OS_TENANT_NAME'])
181+ private_subnet_name = net_info.get('private_subnet_name', 'private_subnet')
182+ private_net_name = net_info.get('private_net_name', 'private')
183+ router_name = net_info.get('router_name', 'provider-router')
184+ ext_net_name = net_info.get('external_net_name', 'ext_net')
185+ ext_subnet_name = net_info.get('external_subnet_name', 'ext_net_subnet')
186 # Create the external network
187 ext_network = mojo_os_utils.create_external_network(
188 neutronc,
189 tenant_id,
190- net_info['external_net_name'],
191+ ext_net_name,
192 net_info['network_type'])
193 mojo_os_utils.create_external_subnet(
194 neutronc,
195 tenant_id,
196 ext_network,
197+ ext_subnet_name,
198 net_info['default_gateway'],
199 net_info['external_net_cidr'],
200 net_info['start_floating_ip'],
201- net_info['end_floating_ip'],
202- net_info['external_subnet_name'])
203- provider_router = mojo_os_utils.create_provider_router(neutronc, tenant_id)
204+ net_info['end_floating_ip'])
205+ provider_router = mojo_os_utils.create_provider_router(neutronc,
206+ tenant_id,
207+ router_name)
208 mojo_os_utils.plug_extnet_into_router(
209 neutronc,
210 provider_router,
211@@ -38,20 +45,22 @@
212 tenant_network = mojo_os_utils.create_tenant_network(
213 neutronc,
214 tenant_id,
215+ private_net_name,
216 shared=False,
217 network_type=net_info['network_type'])
218 tenant_subnet = mojo_os_utils.create_tenant_subnet(
219 neutronc,
220 tenant_id,
221 tenant_network,
222- net_info['private_net_cidr'])
223+ net_info['private_net_cidr'],
224+ private_subnet_name)
225 mojo_os_utils.update_subnet_dns(
226 neutronc,
227 tenant_subnet,
228 net_info['external_dns'])
229 mojo_os_utils.plug_subnet_into_router(
230 neutronc,
231- net_info['router_name'],
232+ router_name,
233 tenant_network,
234 tenant_subnet)
235
236@@ -64,15 +73,19 @@
237 net_topology = mojo_utils.parse_mojo_arg(options, 'net_topology')
238 logging.info('Setting up %s network' % (net_topology))
239 undercloud_novarc = mojo_utils.get_undercload_auth()
240- novac = mojo_os_utils.get_nova_client(undercloud_novarc)
241- neutronc = mojo_os_utils.get_neutron_client(undercloud_novarc)
242- # Add an interface to the neutron-gateway units and tell juju to us it
243- # as the external port
244- net_info = mojo_utils.get_mojo_config('network.yaml')[net_topology]
245- mojo_os_utils.configure_gateway_ext_port(
246- novac,
247- neutronc,
248- dvr_mode=net_info.get('dvr_enabled', False))
249+ if undercloud_novarc:
250+ novac = mojo_os_utils.get_nova_client(undercloud_novarc)
251+ neutronc = mojo_os_utils.get_neutron_client(undercloud_novarc)
252+ # Add an interface to the neutron-gateway units and tell juju to us it
253+ # as the external port
254+ net_info = mojo_utils.get_mojo_config('network.yaml')[net_topology]
255+ mojo_os_utils.configure_gateway_ext_port(
256+ novac,
257+ neutronc,
258+ dvr_mode=net_info.get('dvr_enabled', False))
259+ else:
260+ logging.info("Skipping undercloud network setup "
261+ "(no undercloud_novarc)")
262 setup_sdn(net_topology)
263
264
265
266=== modified file 'helper/tests/simple_os_checks.py'
267--- helper/tests/simple_os_checks.py 2014-12-16 07:59:12 +0000
268+++ helper/tests/simple_os_checks.py 2015-04-27 20:19:34 +0000
269@@ -13,11 +13,13 @@
270 parser.add_argument("--active_wait", default=180)
271 parser.add_argument("--cloudinit_wait", default=180)
272 parser.add_argument("--ping_wait", default=180)
273+ parser.add_argument("--net_name", default='private')
274 options = parser.parse_args()
275 machines = mojo_utils.parse_mojo_arg(options, 'machines', multiargs=True)
276 active_wait = int(mojo_utils.parse_mojo_arg(options, 'active_wait'))
277 cloudinit_wait = int(mojo_utils.parse_mojo_arg(options, 'cloudinit_wait'))
278 ping_wait = int(mojo_utils.parse_mojo_arg(options, 'ping_wait'))
279+ net_name = mojo_utils.parse_mojo_arg(options, 'net_name')
280 logging.basicConfig(level=logging.INFO)
281 overcloud_novarc = mojo_utils.get_overcloud_auth()
282 novac = mojo_os_utils.get_nova_client(overcloud_novarc)
283@@ -30,6 +32,7 @@
284 flavor_name=flavor_name,
285 number=int(count),
286 privkey=priv_key,
287+ net_name=net_name,
288 active_wait=active_wait,
289 cloudinit_wait=cloudinit_wait,
290 ping_wait=ping_wait)
291
292=== modified file 'helper/tests/test_obj_store.py'
293--- helper/tests/test_obj_store.py 2015-01-21 09:46:18 +0000
294+++ helper/tests/test_obj_store.py 2015-04-27 20:19:34 +0000
295@@ -6,6 +6,7 @@
296 import utils.mojo_os_utils as mojo_os_utils
297 import utils.mojo_utils as mojo_utils
298 import sys
299+import logging
300
301
302 class ObjectPushPull(threading.Thread):
303@@ -36,6 +37,10 @@
304 return root_str*sizes[self.payload_size]
305
306 def run(self):
307+ logger = logging.getLogger(__name__)
308+ logger.info("Starting %s: runs=%d, container=%s, payload_size=%s",
309+ self.thread_name, self.runs, self.container,
310+ self.payload_size)
311 for i in range(0, self.runs):
312 test_string = self.get_test_string()
313 string_hash = self.get_hash(test_string)
314@@ -45,6 +50,10 @@
315 self.successes += 1
316 else:
317 self.failures += 1
318+ if (i + 1) % (self.runs / 10) == 0:
319+ logger.info("Thread %s: completed run %d/%d, succ=%d, fail=%d",
320+ self.thread_name, i + 1, self.runs,
321+ self.successes, self.failures)
322
323 def get_swiftclient(self):
324 overcloud_novarc = mojo_utils.get_overcloud_auth()
325@@ -67,6 +76,9 @@
326
327
328 def main(argv):
329+ logger = logging.getLogger(__name__)
330+ logger.addHandler(logging.StreamHandler())
331+ logger.setLevel(logging.INFO)
332 thread1 = ObjectPushPull(10, 'thread1', payload_size='l')
333 thread2 = ObjectPushPull(100, 'thread2', payload_size='s')
334 thread1.start()
335
336=== modified file 'helper/utils/mojo_os_utils.py'
337--- helper/utils/mojo_os_utils.py 2015-02-10 07:55:00 +0000
338+++ helper/utils/mojo_os_utils.py 2015-04-27 20:19:34 +0000
339@@ -197,19 +197,23 @@
340 for net in neutron_client.list_networks()['networks']:
341 if net['name'].endswith('_admin_net'):
342 return net
343+ logging.warning('No *_admin_net network found')
344
345
346 def configure_gateway_ext_port(novaclient, neutronclient, dvr_mode=None):
347 uuids = get_gateway_uuids()
348 if dvr_mode:
349 uuids.extend(get_ovs_uuids())
350- admin_net_id = get_admin_net(neutronclient)['id']
351+ admin_net = get_admin_net(neutronclient)
352+ if not admin_net:
353+ logging.error('No admin_net found, skipping exp_port setup')
354+ return
355 for uuid in uuids:
356 server = novaclient.servers.get(uuid)
357 mac_addrs = [a.mac_addr for a in server.interface_list()]
358 if len(mac_addrs) < 2:
359 logging.info('Adding additional port to server')
360- server.interface_attach(port_id=None, net_id=admin_net_id,
361+ server.interface_attach(port_id=None, net_id=admin_net['id'],
362 fixed_ip=None)
363 else:
364 logging.warning('Neutron Gateway already has additional port')
365@@ -229,8 +233,8 @@
366 mojo_utils.juju_wait_finished()
367
368
369-def create_tenant_network(neutron_client, tenant_id, net_name='private',
370- shared=False, network_type='gre'):
371+def create_tenant_network(neutron_client, tenant_id, net_name, shared=False,
372+ network_type='gre'):
373 networks = neutron_client.list_networks(name=net_name)
374 if len(networks['networks']) == 0:
375 logging.info('Creating network: %s',
376@@ -252,8 +256,9 @@
377 return network
378
379
380-def create_external_network(neutron_client, tenant_id, net_name='ext_net',
381+def create_external_network(neutron_client, tenant_id, net_name,
382 network_type='gre'):
383+ logging.info('Configuring external net_name={}'.format(net_name))
384 networks = neutron_client.list_networks(name=net_name)
385 if len(networks['networks']) == 0:
386 logging.info('Configuring external bridge')
387@@ -277,8 +282,8 @@
388 return network
389
390
391-def create_tenant_subnet(neutron_client, tenant_id, network, cidr, dhcp=True,
392- subnet_name='private_subnet'):
393+def create_tenant_subnet(neutron_client, tenant_id, network, cidr, subnet_name,
394+ dhcp=True):
395 # Create subnet
396 subnets = neutron_client.list_subnets(name=subnet_name)
397 if len(subnets['subnets']) == 0:
398@@ -300,10 +305,9 @@
399 return subnet
400
401
402-def create_external_subnet(neutron_client, tenant_id, network,
403+def create_external_subnet(neutron_client, tenant_id, network, subnet_name,
404 default_gateway=None, cidr=None,
405- start_floating_ip=None, end_floating_ip=None,
406- subnet_name='ext_net_subnet'):
407+ start_floating_ip=None, end_floating_ip=None):
408 subnets = neutron_client.list_subnets(name=subnet_name)
409 if len(subnets['subnets']) == 0:
410 subnet_msg = {
411@@ -345,20 +349,20 @@
412 neutron_client.update_subnet(subnet['id'], msg)
413
414
415-def create_provider_router(neutron_client, tenant_id):
416- routers = neutron_client.list_routers(name='provider-router')
417+def create_provider_router(neutron_client, tenant_id, router_name):
418+ routers = neutron_client.list_routers(name=router_name)
419 if len(routers['routers']) == 0:
420 logging.info('Creating provider router for external network access')
421 router_info = {
422 'router': {
423- 'name': 'provider-router',
424+ 'name': router_name,
425 'tenant_id': tenant_id
426 }
427 }
428 router = neutron_client.create_router(router_info)['router']
429- logging.info('New router created: %s', (router['id']))
430+ logging.info('New router "%s" created: %s', router_name, router['id'])
431 else:
432- logging.warning('Router provider-router already exists.')
433+ logging.warning('Router %s already exists.', router_name)
434 router = routers['routers'][0]
435 return router
436
437@@ -406,10 +410,10 @@
438 return new_key.private_key
439
440
441-def boot_instance(nova_client, image_name, flavor_name, key_name):
442+def boot_instance(nova_client, image_name, flavor_name, key_name, net_name):
443 image = nova_client.images.find(name=image_name)
444 flavor = nova_client.flavors.find(name=flavor_name)
445- net = nova_client.networks.find(label="private")
446+ net = nova_client.networks.find(label=net_name)
447 nics = [{'net-id': net.id}]
448 # Obviously time may not produce a unique name
449 vm_name = time.strftime("%Y%m%d%H%M%S")
450@@ -539,13 +543,15 @@
451
452
453 def boot_and_test(nova_client, image_name, flavor_name, number, privkey,
454- active_wait=180, cloudinit_wait=180, ping_wait=180):
455+ net_name, active_wait=180, cloudinit_wait=180,
456+ ping_wait=180):
457 image_config = mojo_utils.get_mojo_config('images.yaml')
458 for counter in range(number):
459 instance = boot_instance(nova_client,
460 image_name=image_name,
461 flavor_name=flavor_name,
462- key_name='mojo')
463+ key_name='mojo',
464+ net_name=net_name)
465 wait_for_boot(nova_client, instance.name,
466 image_config[image_name]['bootstring'], active_wait,
467 cloudinit_wait)
468
469=== modified file 'helper/utils/mojo_utils.py'
470--- helper/utils/mojo_utils.py 2015-02-10 07:55:00 +0000
471+++ helper/utils/mojo_utils.py 2015-04-27 20:19:34 +0000
472@@ -3,6 +3,7 @@
473 import subprocess
474 import yaml
475 import os
476+import re
477 import mojo
478 import logging
479 import time
480@@ -16,6 +17,60 @@
481 }
482
483
484+def bootstrap(*args):
485+ if get_env(True):
486+ return False
487+ logging.info("Bootstrapping juju environment")
488+ cmd = ['juju', 'bootstrap'] + list(args)
489+ subprocess.check_call(cmd)
490+ logging.info("Waiting for bootstrap to settle")
491+ juju_status_check_and_wait()
492+ return True
493+
494+
495+def ensure_availability(*extra_args):
496+ status = get_juju_status()
497+ if status["machines"].keys() != ["0"]:
498+ logging.warning("Unimplemented: environment should only have "
499+ "machine 0 to ensure_availability")
500+ return False
501+ cmd = ['juju', 'ensure-availability'] + list(extra_args)
502+ logging.info("Calling: {}".format(cmd))
503+ subprocess.check_call(cmd)
504+ logging.info("Waiting for ensure-availability to settle")
505+ juju_status_check_and_wait()
506+ return True
507+
508+
509+def set_constraints(constraints):
510+ if type(constraints) == str:
511+ constraints = [constraints]
512+ else:
513+ constraints = list(constraints)
514+ cmd = ['juju', 'set-constraints'] + constraints
515+ subprocess.check_call(cmd)
516+
517+
518+def deploy(charm, service, *extra_args):
519+ cmd = ['juju', 'deploy', charm, service] + list(extra_args)
520+ subprocess.check_call(cmd)
521+
522+
523+def get_env(softfail=False):
524+ cmd = ['juju', 'get-env']
525+ output = None
526+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
527+ stderr=subprocess.STDOUT)
528+ output, _ = p.communicate()
529+ if p.returncode != 0:
530+ if softfail and re.match('.*is not bootstrapped.*', output):
531+ return {}
532+ else:
533+ raise Exception("juju get-env failed (rc={}) with:\n{}"
534+ "".format(p.returncode, output))
535+ return output
536+
537+
538 def get_juju_status(service=None, unit=None):
539 cmd = ['juju', 'status']
540 if service:
541@@ -41,7 +96,7 @@
542 else:
543 services = [svc for svc in juju_status['services']]
544 for svc in services:
545- if 'units' in juju_status['services'][svc]:
546+ if 'units' in juju_status['services'].get(svc, {}):
547 for unit in juju_status['services'][svc]['units']:
548 units.append(unit)
549 return units
550@@ -181,14 +236,16 @@
551 units, lambda a, b: cmp(int(a.split('/')[-1]), int(b.split('/')[-1])))
552
553
554-def add_unit(service, unit_num=None):
555+def add_unit(service, unit_num=None, *extra_args):
556+ extra_args = list(extra_args)
557 unit_count = len(get_juju_units(service=service))
558 if unit_num:
559 additional_units = int(unit_num)
560 else:
561 additional_units = 1
562 logging.info('Adding %i unit(s) to %s' % (additional_units, service))
563- cmd = ['juju', 'add-unit', service, '-n', str(additional_units)]
564+ cmd = ['juju', 'add-unit', service, '-n',
565+ str(additional_units)] + extra_args
566 subprocess.check_call(cmd)
567 target_num = unit_count + additional_units
568 # Wait for the new unit to appear in juju status
569@@ -217,25 +274,40 @@
570 cmd = ['juju', 'get', service]
571 juju_get_output = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
572 service_config = yaml.load(juju_get_output)
573- if 'value' in service_config['settings'][option]:
574+ # softfail if option not present
575+ if 'value' in service_config['settings'].get(option, {}):
576 return service_config['settings'][option]['value']
577
578
579-def get_juju_environments_yaml():
580- juju_env_file = open(os.environ['HOME'] + "/.juju/environments.yaml", 'r')
581- return yaml.load(juju_env_file)
582+def get_juju_environment(juju_env):
583+ juju_env_file_env = os.path.join(os.environ['HOME'],
584+ ".juju/environments",
585+ "{}.jenv".format(juju_env))
586+ juju_env_file_all = os.path.join(os.environ['HOME'],
587+ ".juju/environments.yaml")
588+ if os.path.exists(juju_env_file_env):
589+ return yaml.load(open(juju_env_file_env))["bootstrap-config"]
590+ if os.path.exists(juju_env_file_all):
591+ return yaml.load(open(juju_env_file_all))[juju_env]
592+
593+
594+def get_juju_env():
595+ return (os.environ.get("JUJU_ENV") or
596+ subprocess.check_output(['juju', 'switch']).strip('\n'))
597
598
599 def get_provider_type():
600- juju_env = subprocess.check_output(['juju', 'switch']).strip('\n')
601- juju_env_contents = get_juju_environments_yaml()
602- return juju_env_contents['environments'][juju_env]['type']
603+ juju_env = get_juju_env()
604+ juju_env_contents = get_juju_environment(juju_env)
605+ return juju_env_contents['type']
606
607
608 def get_undercload_auth():
609- juju_env = subprocess.check_output(['juju', 'switch']).strip('\n')
610- juju_env_contents = get_juju_environments_yaml()
611- novarc_settings = juju_env_contents['environments'][juju_env]
612+ juju_env = get_juju_env()
613+ novarc_settings = get_juju_environment(juju_env)
614+ # e.g. over MaaS:
615+ if not 'auth-url' in novarc_settings:
616+ return None
617 auth_settings = {
618 'OS_AUTH_URL': novarc_settings['auth-url'],
619 'OS_TENANT_NAME': novarc_settings['tenant-name'],
620@@ -247,10 +319,9 @@
621
622
623 def get_undercload_netid():
624- juju_env = subprocess.check_output(['juju', 'switch']).strip('\n')
625- juju_env_contents = get_juju_environments_yaml()
626- if 'network' in juju_env_contents['environments'][juju_env]:
627- return juju_env_contents['environments'][juju_env]['network']
628+ juju_env = get_juju_env()
629+ juju_env_contents = get_juju_environment(juju_env)
630+ return juju_env_contents.get('network')
631
632
633 # Openstack Client helpers
634@@ -273,17 +344,20 @@
635 transport = 'http'
636 port = 5000
637 address = get_auth_url()
638+ os_password = juju_get('keystone', 'admin-password') or 'openstack'
639+ os_region_name = juju_get('keystone', 'region') or 'RegionOne'
640 auth_settings = {
641 'OS_AUTH_URL': '%s://%s:%i/v2.0' % (transport, address, port),
642 'OS_TENANT_NAME': 'admin',
643 'OS_USERNAME': 'admin',
644- 'OS_PASSWORD': 'openstack',
645- 'OS_REGION_NAME': 'RegionOne',
646+ 'OS_PASSWORD': os_password,
647+ 'OS_REGION_NAME': os_region_name,
648 }
649 return auth_settings
650
651
652 def get_mojo_file(filename):
653+ mfile = None
654 if 'MOJO_SPEC_DIR' in os.environ:
655 spec = mojo.Spec(os.environ['MOJO_SPEC_DIR'])
656 mfile = spec.get_config(filename, stage=os.environ['MOJO_STAGE'])
657
658=== added directory 'specs/bootstack'
659=== added directory 'specs/bootstack/customer'
660=== added directory 'specs/bootstack/customer/configs'
661=== added file 'specs/bootstack/customer/configs/bootstack-charms.bzr'
662--- specs/bootstack/customer/configs/bootstack-charms.bzr 1970-01-01 00:00:00 +0000
663+++ specs/bootstack/customer/configs/bootstack-charms.bzr 2015-04-27 20:19:34 +0000
664@@ -0,0 +1,35 @@
665+#openstack:
666+ubuntu lp:charms/trusty/ubuntu;revno=10
667+ubuntu-nagios lp:charms/trusty/ubuntu;revno=10
668+ntp lp:charms/trusty/ntp;revno=18
669+hacluster lp:charms/trusty/hacluster;revno=41
670+nova-compute lp:charms/trusty/nova-compute;revno=100
671+ceph lp:charms/trusty/ceph;revno=99
672+ceph-osd lp:charms/trusty/ceph-osd;revno=38
673+quantum-gateway lp:charms/trusty/quantum-gateway;revno=86
674+neutron-api lp:charms/trusty/neutron-api;revno=74
675+neutron-openvswitch lp:charms/trusty/neutron-openvswitch;revno=42
676+swift-proxy lp:charms/trusty/swift-proxy;revno=81
677+swift-storage lp:charms/trusty/swift-storage;revno=57
678+ceilometer lp:charms/trusty/ceilometer;revno=66
679+ceilometer-agent lp:charms/trusty/ceilometer-agent;revno=46
680+heat lp:charms/trusty/heat;revno=33
681+cinder lp:charms/trusty/cinder;revno=72
682+glance lp:charms/trusty/glance;revno=97
683+glance-simplestreams-sync lp:charms/trusty/glance-simplestreams-sync;revno=52
684+keystone lp:charms/trusty/keystone;revno=121
685+nova-cloud-controller lp:charms/trusty/nova-cloud-controller;revno=141
686+openstack-dashboard lp:charms/trusty/openstack-dashboard;revno=50
687+mysql lp:charms/trusty/percona-cluster;revno=47
688+rabbitmq-server lp:charms/trusty/rabbitmq-server;revno=85
689+mongodb lp:charms/trusty/mongodb;revno=67
690+memcached lp:charms/trusty/memcached;revno=67
691+
692+## #landscape:
693+## postgresql lp:charms/trusty/postgresql;revno=111
694+## apache2 lp:charms/trusty/apache2;revno=61
695+## haproxy lp:charms/trusty/haproxy;revno=86
696+## landscape lp:~landscape/landscape-charm/trunk;revno=218
697+
698+#ksplice:
699+#trusty/ksplice lp:~canonical-sysadmins/canonical-is-charms/ksplice;revno=16
700
701=== added file 'specs/bootstack/customer/configs/bootstack-example.yaml'
702--- specs/bootstack/customer/configs/bootstack-example.yaml 1970-01-01 00:00:00 +0000
703+++ specs/bootstack/customer/configs/bootstack-example.yaml 2015-04-27 20:19:34 +0000
704@@ -0,0 +1,735 @@
705+# vim: set sw=2 ts=2 et si:
706+#
707+# HACK: until LP#1324129 is fixed openstack-ha *needs* cmdline manual placement:
708+# juju deploy cs:ubuntu openstack-ha --to 0
709+# juju add-unit openstack-ha --to 1
710+# juju add-unit openstack-ha --to 2
711+# This hack is performed by scripts/deploy-openstack.sh - there is no need for it to be done manually.
712+#
713+# NOTE: You also need to edit ../secrets/bootstack-example-secrets.yaml
714+#
715+# NOTE: to simplify mojo deployment tests/CI, we fully include bootstack-example-secrets.yaml
716+# content here (vs needing /srv/mojo/LOLOAL/$MOJO_PROJECT/$MOJO_STAGE/bootstack-example-secrets.yaml,
717+# except LDS and KSPLICE secrets
718+
719+###### mojo secrets BEGIN {
720+# pwgen 16:
721+config: &KEYSTONE_ADMIN_PASSWORD OgP2WZ2RITCLM3qG
722+config: &KEYSTONE_ADMIN_TOKEN iE8AtnALUvG729YD
723+config: &MYSQL_ROOT_PASSWORD IbXkv5PehNNBW1kO
724+config: &MYSQL_SST_PASSWORD GPtecPBWLlODg3S2
725+
726+# Below secret must be generated with:
727+# ceph-authtool /dev/stdout --name=mon. --gen-key
728+config: &CEPH_MON_SECRET AQDymD5V4Di7JhAAh/SUQsmApvn4XAGHWs7rCQ==
729+
730+bootstack-stage1-secrets:
731+ services:
732+ mysql:
733+ options:
734+ root-password: *MYSQL_ROOT_PASSWORD
735+ sst-password: *MYSQL_SST_PASSWORD
736+
737+bootstack-stage2-secrets:
738+ services:
739+ ceph:
740+ options:
741+ monitor-secret: *CEPH_MON_SECRET
742+
743+ keystone:
744+ options:
745+ admin-password: *KEYSTONE_ADMIN_PASSWORD
746+ admin-token: *KEYSTONE_ADMIN_TOKEN
747+###### mojo secrets END }
748+
749+
750+# Quick check:
751+# python -c 'import yaml,sys,json;print yaml.dump(yaml.load(sys.stdin), default_flow_style=False)' < configs/bootstack-example.yaml
752+config: &REGION bootstack-example
753+config: &NAGIOS_CONTEXT bootstack-example
754+
755+# uuidgen:
756+config: &CEPH_FSID 7f3e5d9e-b6da-41d9-bfda-7aa4ddf32a52
757+config: &SWIFT_HASH b4e8c139-8404-44e3-bb8e-0e95878f9d96
758+
759+# depending on hardware layout:
760+# NOTE on placement:
761+# _CONST and _TO are exclusive: either TO (same host as existing service), xor new TAGged host (from MaaS)
762+# and must be edited on the service stanza itself (constraints: or to:)
763+
764+# Usual disk arrangement: /dev/sda holds OS, /dev/sdb holds /srv/nova/instances
765+# /srv/nova/instances must be created and mounted prior to deployment - see maas-preseeds.tmpl for details.
766+config: &NOVA_COMPUTE_UNITS 4
767+config: &NOVA_INSTANCES_PATH /srv/nova/instances
768+#config: &NOVA_TO [ ceph=0, ceph=1, ceph=2, ceph-osd=0 ]
769+config: &NOVA_CONST tags=compute
770+
771+config: &SWIFT_ZN_UNITS 1
772+config: &SWIFT_Z1_TO [ nova-compute=0 ]
773+config: &SWIFT_Z2_TO [ nova-compute=1 ]
774+config: &SWIFT_Z3_TO [ nova-compute=2 ]
775+config: &SWIFT_BLOCK_DEVICE /dev/sdd
776+# &SWIFT_BLOCK_DEVICE may be a string containing multiple space-separated device names.
777+
778+config: &CEPH_MON_UNITS 3
779+#config: &CEPH_MON_CONST tags=storage
780+#config: &CEPH_MON_TO [ nova-compute=0, nova-compute=1, nova-compute=2 ]
781+config: &CEPH_MON_TO [ openstack-ha=0, openstack-ha=1, openstack-ha=2 ]
782+config: &CEPH_PUBLIC_NET 172.20.168.0/21
783+config: &CEPH_CLUSTER_NET 172.20.168.0/21
784+
785+config: &CEPH_OSD_UNITS 4
786+# _CONST and _TO are exclusive: either TO same host as existing service, or new TAGged host
787+#config: &CEPH_OSD_CONST tags=storage
788+# config: &CEPH_OSD_TO [ nova-compute=3 ]
789+config: &CEPH_OSD_TO [ nova-compute=0, nova-compute=1, nova-compute=2, nova-compute=3 ]
790+
791+config: &CEPH_OSD_DEVICES /dev/sdc
792+# &CEPH_OSD_DEVICES may be a string containing multiple space-separated device names.
793+
794+# manually set, depends on openstack-ha hosts network interfaces - see maas-interfaces.tmpl for details
795+config: &NEUTRON_GW_EXT_IFACE veth-os-ovs
796+config: &NEUTRON_GW_INT_IFACE br0
797+
798+# manually set, depends on priv VLAN CIDR:
799+config: &SWIFT_VIP '172.20.168.100'
800+config: &CINDER_VIP '172.20.168.101'
801+config: &GLANCE_VIP '172.20.168.102'
802+config: &KEYSTONE_VIP '172.20.168.103'
803+config: &MYSQL_VIP '172.20.168.104'
804+config: &NCC_VIP '172.20.168.105'
805+config: &DASH_VIP '172.20.168.106'
806+config: &NAPI_VIP '172.20.168.107'
807+config: &CEILOMETER_VIP '172.20.168.108'
808+
809+bootstack-stage1:
810+ inherits: bootstack-stage1-secrets
811+ series: trusty
812+ # set below values to every charm where these settings exist
813+ overrides:
814+ nagios_context: *NAGIOS_CONTEXT
815+ corosync_transport: unicast
816+ services:
817+ # Already manually deployed as per above, added here to satisfy 'existence' to juju-deployer
818+ # Openstack and Juju HA boxes
819+ openstack-ha:
820+ #charm: cs:trusty/ubuntu
821+ charm: ubuntu
822+ num_units: 3
823+ # HACK: When LP#1324129 is fixed, we can uncomment below placement
824+ # to: [0, 1, 2]
825+ options:
826+ new-lxc-network: True
827+
828+ ## Infra box: deploy to metal, in advance (services deployed at stage3 to lxc:infra)
829+ ## Infra services are not used in internal deploys
830+ infra:
831+ constraints: tags=infra
832+ #charm: cs:trusty/ubuntu
833+ charm: ubuntu
834+ num_units: 1
835+
836+ ## Compute nodes - hulk smashed onto swift and ceph nodes
837+ nova-compute:
838+ #charm: cs:trusty/nova-compute
839+ #branch: lp:~openstack-charmers/charms/trusty/nova-compute/next
840+ charm: nova-compute
841+ num_units: *NOVA_COMPUTE_UNITS
842+ constraints: *NOVA_CONST
843+ #to: *NOVA_TO
844+ options:
845+ enable-resize: True
846+ enable-live-migration: True
847+ migration-auth-type: ssh
848+ instances-path: *NOVA_INSTANCES_PATH
849+ # XXX: race: this fails if virsh iptables stuff hasn't been setup yet (no conntrack modules loaded)
850+ # sysctl: "{ 'net.nf_conntrack_max': 589824, 'net.netfilter.nf_conntrack_tcp_timeout_established': 86400 }"
851+
852+ ## Neutron Gateway (hulk smashed onto opensatck-ha hosts)
853+# TODO(jjo): complete neutron-gw HA RT#78447
854+# neutron-gw-hacluster:
855+# charm: hacluster
856+ neutron-gateway:
857+ #charm: cs:trusty/quantum-gateway
858+ #branch: lp:~openstack-charmers/charms/trusty/quantum-gateway/next
859+ charm: quantum-gateway
860+ # HACK: When LP#1324129 is fixed, we can uncomment below placement
861+ # to: [0, 1, 2]
862+ to: [ openstack-ha=0, openstack-ha=1, openstack-ha=2 ]
863+ num_units: 3
864+ options:
865+ ext-port: *NEUTRON_GW_EXT_IFACE
866+# ha-legacy-mode: True
867+# ha-bindiface: *NEUTRON_GW_INT_IFACE
868+
869+ mysql-hacluster:
870+ #charm: cs:trusty/hacluster
871+ charm: hacluster
872+ mysql:
873+ #charm: cs:trusty/mysql
874+ #branch: lp:~openstack-charmers/charms/trusty/mysql/trunk
875+ charm: mysql
876+ num_units: 1
877+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
878+ options:
879+ dataset-size: 4G
880+ vip: *MYSQL_VIP
881+ #vip_cidr: 21
882+ max-connections: 2500
883+
884+ ## Subordinates
885+ ntp:
886+ #charm: cs:~charmers/trusty/ntp
887+ charm: ntp
888+ options:
889+ source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
890+
891+ ntp-physical:
892+ #charm: cs:~charmers/trusty/ntp
893+ charm: ntp
894+ options:
895+ source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
896+
897+ ntp-smoosh:
898+ #charm: cs:~charmers/trusty/ntp
899+ charm: ntp
900+ options:
901+ source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
902+
903+bootstack-stage2:
904+ inherits: [bootstack-stage1, bootstack-stage2-secrets]
905+ services:
906+ ## Ceph
907+ ceph:
908+ #charm: cs:trusty/ceph
909+ #branch: lp:~openstack-charmers/charms/trusty/ceph/next
910+ charm: ceph
911+ num_units: *CEPH_MON_UNITS
912+ #constraints: *CEPH_MON_CONST
913+ to: *CEPH_MON_TO
914+ options:
915+ monitor-count: *CEPH_MON_UNITS
916+ fsid: *CEPH_FSID
917+ osd-reformat: kill-all-my-data
918+ #osd-devices: *CEPH_OSD_DEVICES
919+ ceph-public-network: *CEPH_PUBLIC_NET
920+ ceph-cluster-network: *CEPH_CLUSTER_NET
921+ ceph-osd:
922+ #charm: cs:trusty/ceph-osd
923+ #branch: lp:~openstack-charmers/charms/trusty/ceph-osd/next
924+ charm: ceph-osd
925+ num_units: *CEPH_OSD_UNITS
926+ #constraints: *CEPH_OSD_CONST
927+ to: *CEPH_OSD_TO
928+ options:
929+ osd-reformat: kill-all-my-data
930+ osd-devices: *CEPH_OSD_DEVICES
931+ ceph-public-network: *CEPH_PUBLIC_NET
932+ ceph-cluster-network: *CEPH_CLUSTER_NET
933+
934+ swift-storage-z1:
935+ #charm: cs:trusty/swift-storage
936+ #branch: lp:~openstack-charmers/charms/trusty/swift-storage/next
937+ charm: swift-storage
938+ num_units: *SWIFT_ZN_UNITS
939+ #constraints: tags=swift-z1
940+ options:
941+ zone: 1
942+ block-device: *SWIFT_BLOCK_DEVICE
943+ overwrite: "true"
944+ to: *SWIFT_Z1_TO
945+ swift-storage-z2:
946+ #charm: cs:trusty/swift-storage
947+ #branch: lp:~openstack-charmers/charms/trusty/swift-storage/next
948+ charm: swift-storage
949+ num_units: *SWIFT_ZN_UNITS
950+ #constraints: tags=swift-z2
951+ options:
952+ zone: 2
953+ block-device: *SWIFT_BLOCK_DEVICE
954+ overwrite: "true"
955+ to: *SWIFT_Z2_TO
956+ swift-storage-z3:
957+ #charm: cs:trusty/swift-storage
958+ #branch: lp:~openstack-charmers/charms/trusty/swift-storage/next
959+ charm: swift-storage
960+ num_units: *SWIFT_ZN_UNITS
961+ #constraints: tags=swift-z3
962+ options:
963+ zone: 3
964+ block-device: *SWIFT_BLOCK_DEVICE
965+ overwrite: "true"
966+ to: *SWIFT_Z3_TO
967+
968+ # Everything else is in containers
969+ ## Stateless services ##
970+ swift-hacluster:
971+ #charm: cs:trusty/hacluster
972+ charm: hacluster
973+ options:
974+ cluster_count: 3
975+ swift-proxy:
976+ num_units: 3
977+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
978+ #charm: cs:trusty/swift-proxy
979+ #branch: lp:~openstack-charmers/charms/trusty/swift-proxy/next
980+ charm: swift-proxy
981+ options:
982+ replicas: 3
983+ #use-https: "no"
984+ zone-assignment: manual
985+ region: *REGION
986+ partition-power: 18
987+ workers: 2
988+ swift-hash: *SWIFT_HASH
989+ vip: *SWIFT_VIP
990+ #vip_cidr: 21
991+ ceilometer-hacluster:
992+ charm: hacluster
993+ options:
994+ cluster_count: 3
995+ ceilometer:
996+ #charm: cs:trusty/ceilometer
997+ #branch: lp:~openstack-charmers/charms/trusty/ceilometer/next
998+ charm: ceilometer
999+ num_units: 3
1000+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1001+ options:
1002+ region: *REGION
1003+ vip: *CEILOMETER_VIP
1004+ ceilometer-agent:
1005+ #charm: cs:trusty/ceilometer-agent
1006+ #branch: lp:~openstack-charmers/charms/trusty/ceilometer-agent/next
1007+ charm: ceilometer-agent
1008+
1009+ heat:
1010+ #charm: cs:trusty/heat
1011+ #branch: lp:~openstack-charmers/charms/trusty/heat/next
1012+ charm: heat
1013+ to: [ "lxc:openstack-ha=0" ]
1014+ options:
1015+ region: *REGION
1016+
1017+ cinder-hacluster:
1018+ #charm: cs:trusty/hacluster
1019+ charm: hacluster
1020+ options:
1021+ cluster_count: 3
1022+ cinder:
1023+ #charm: cs:trusty/cinder
1024+ #branch: lp:~openstack-charmers/charms/trusty/cinder/next
1025+ charm: cinder
1026+ num_units: 3
1027+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1028+ options:
1029+ block-device: "None"
1030+ region: *REGION
1031+ glance-api-version: 2
1032+ ceph-osd-replication-count: 3
1033+ vip: *CINDER_VIP
1034+ #vip_cidr: 21
1035+
1036+ glance-hacluster:
1037+ #charm: cs:trusty/hacluster
1038+ charm: hacluster
1039+ options:
1040+ cluster_count: 3
1041+ glance:
1042+ charm: glance
1043+ num_units: 3
1044+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1045+ options:
1046+ region: *REGION
1047+ vip: *GLANCE_VIP
1048+ #vip_cidr: 21
1049+
1050+ # TODO: HA
1051+ glance-simplestreams-sync:
1052+ #charm: cs:trusty/glance-simplestreams-sync
1053+ charm: glance-simplestreams-sync
1054+ to: [ "lxc:openstack-ha=0" ]
1055+ options:
1056+ cloud_name: *REGION
1057+ region: *REGION
1058+ mirror_list: include-file://{{ spec_dir }}/specs/bootstack/customer/configs/glance-streams.yaml
1059+
1060+ keystone-hacluster:
1061+ #charm: cs:trusty/hacluster
1062+ charm: hacluster
1063+ options:
1064+ cluster_count: 3
1065+ keystone:
1066+ #charm: cs:trusty/keystone
1067+ #branch: lp:~openstack-charmers/charms/trusty/keystone/next
1068+ charm: keystone
1069+ num_units: 3
1070+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1071+ options:
1072+ region: *REGION
1073+ vip: *KEYSTONE_VIP
1074+ #vip_cidr: 21
1075+
1076+ ncc-hacluster:
1077+ #charm: cs:trusty/hacluster
1078+ charm: hacluster
1079+ options:
1080+ cluster_count: 3
1081+ ncc-memcached:
1082+ charm: memcached
1083+ num_units: 3
1084+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1085+ options:
1086+ size: 128
1087+ allow-ufw-ip6-softfail: true
1088+ nova-cloud-controller:
1089+ #charm: cs:trusty/nova-cloud-controller
1090+ #branch: lp:~openstack-charmers/charms/trusty/nova-cloud-controller/next
1091+ charm: nova-cloud-controller
1092+ num_units: 3
1093+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1094+ options:
1095+ # config-flags will likely be overloaded by additional settings like
1096+ # quota_RESOURCENAME=nnn,scheduler_default_filters=...
1097+ config-flags: disk_allocation_ratio=1.0
1098+ cpu-allocation-ratio: 4.0
1099+ ram-allocation-ratio: 0.9
1100+ network-manager: Neutron
1101+ console-access-protocol: novnc
1102+ quantum-security-groups: "yes"
1103+ region: *REGION
1104+ vip: *NCC_VIP
1105+ #vip_cidr: 21
1106+
1107+ osd-hacluster:
1108+ #charm: cs:trusty/hacluster
1109+ charm: hacluster
1110+ options:
1111+ cluster_count: 3
1112+ openstack-dashboard:
1113+ #charm: cs:trusty/openstack-dashboard
1114+ #branch: lp:~openstack-charmers/charms/trusty/openstack-dashboard/next
1115+ #branch: lp:~brad-marshall/charms/trusty/openstack-dashboard/fix-hacluster-config
1116+ charm: openstack-dashboard
1117+ num_units: 3
1118+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1119+ options:
1120+ vip: *DASH_VIP
1121+ #vip_cidr: 21
1122+
1123+ neutron-hacluster:
1124+ charm: hacluster
1125+ options:
1126+ cluster_count: 3
1127+ neutron-api:
1128+ charm: neutron-api
1129+ num_units: 3
1130+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1131+ options:
1132+ vip: *NAPI_VIP
1133+ neutron-security-groups: True
1134+ region: *REGION
1135+ neutron-openvswitch:
1136+ charm: neutron-openvswitch
1137+
1138+ # HACK: 2 rabbitmq-server units here, 3rd added later
1139+ # NOTE rabbitmq-server does its own clustering, no VIP needed
1140+ rabbitmq-server:
1141+ #charm: cs:trusty/rabbitmq-server
1142+ #branch: lp:~jjo/charms/trusty/rabbitmq-server/fix-nodename-to-host-dns-PTR
1143+ # Combined branch: gnuoy's fix for rabbitmq-server creds across relations, and
1144+ # clustering with an invalid hostname
1145+ #branch: lp:~jjo/charms/trusty/rabbitmq-server/gnuoy-lp1355848_jjo-lp1274947
1146+ charm: rabbitmq-server
1147+ num_units: 2
1148+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1149+
1150+ # HACK: 1 mongodb unit here, others added later, as per mongodb README
1151+ # NOTE mongodb does its own clustering, no VIP needed
1152+ mongodb:
1153+ num_units: 1
1154+ to: [ "lxc:openstack-ha=0", "lxc:openstack-ha=1", "lxc:openstack-ha=2" ]
1155+ #charm: cs:trusty/aongodb
1156+ #branch: lp:~jjo/charms/trusty/mongodb/jjo-lp1274947-use_private-address_at_rs-initiate
1157+ charm: mongodb
1158+
1159+ mysql:
1160+ num_units: 2
1161+
1162+ # Keeping hacluster relations here, as they are intra-service, and
1163+ # needed to have service VIPs alive
1164+ relations:
1165+ - [ mysql, mysql-hacluster ]
1166+ - [ keystone, keystone-hacluster ]
1167+ - [ nova-cloud-controller, ncc-hacluster ]
1168+ - [ glance, glance-hacluster ]
1169+ - [ cinder, cinder-hacluster ]
1170+ - [ openstack-dashboard, osd-hacluster ]
1171+ - [ swift-proxy, swift-hacluster ]
1172+ - [ neutron-api, neutron-hacluster ]
1173+ - [ ceilometer, ceilometer-hacluster ]
1174+# - [ neutron-gateway, neutron-gw-hacluster ]
1175+
1176+bootstack-stage2-relations:
1177+ inherits: bootstack-stage2
1178+ relations:
1179+ - [ ceph, ceph-osd ]
1180+ - [ keystone, mysql ]
1181+ - [ nova-cloud-controller, mysql ]
1182+ - [ nova-cloud-controller, rabbitmq-server ]
1183+ - [ nova-cloud-controller, glance ]
1184+ - [ neutron-api, mysql ]
1185+ - [ neutron-api, rabbitmq-server ]
1186+ - [ neutron-api, nova-cloud-controller ]
1187+ - [ neutron-api, neutron-openvswitch ]
1188+ - [ nova-compute, nova-cloud-controller ]
1189+ - [ nova-compute, mysql ]
1190+ - [ nova-compute, "rabbitmq-server:amqp" ]
1191+ - [ nova-compute, glance ]
1192+ - [ nova-compute, ceph ]
1193+ - [ neutron-openvswitch, nova-compute ]
1194+ - [ neutron-openvswitch, rabbitmq-server ]
1195+ - [ glance, mysql ]
1196+ - [ glance, ceph ]
1197+ - [ glance, cinder ]
1198+ - [ cinder, mysql ]
1199+ - [ cinder, rabbitmq-server ]
1200+ - [ cinder, nova-cloud-controller ]
1201+ - [ cinder, ceph ]
1202+ - [ neutron-gateway, mysql ]
1203+ - [ "neutron-gateway:amqp", "rabbitmq-server:amqp" ]
1204+ - [ neutron-gateway, nova-cloud-controller ]
1205+ - [ ntp-smoosh, nova-compute ]
1206+ - [ ntp-physical, neutron-gateway ]
1207+ - [ ntp-physical, infra ]
1208+ - [ swift-proxy, swift-storage-z1 ]
1209+ - [ swift-proxy, swift-storage-z2 ]
1210+ - [ swift-proxy, swift-storage-z3 ]
1211+ - [ swift-proxy, swift-storage-z3 ]
1212+ - [ ceilometer, rabbitmq-server ]
1213+ - [ ceilometer, mongodb ]
1214+ - [ ceilometer-agent, ceilometer ]
1215+ - [ ceilometer-agent, nova-compute ]
1216+ - [ heat, rabbitmq-server ]
1217+ - [ swift-proxy, keystone ]
1218+ - [ nova-cloud-controller, keystone ]
1219+ - [ neutron-api, keystone ]
1220+ - [ cinder, keystone ]
1221+ - [ glance, keystone ]
1222+ - [ openstack-dashboard, keystone ]
1223+ - [ ceilometer, "keystone:identity-service" ]
1224+ - [ ceilometer, "keystone:identity-notifications" ]
1225+ - [ heat, keystone ]
1226+ - [ heat, mysql ]
1227+ - [ glance-simplestreams-sync, keystone ]
1228+ - [ nova-cloud-controller, ncc-memcached ]
1229+
1230+bootstack-stage2-complete:
1231+ inherits: bootstack-stage2-relations
1232+ services:
1233+ # HACK: add the 3rd units to all
1234+ rabbitmq-server:
1235+ num_units: 3
1236+ mysql:
1237+ num_units: 3
1238+ mongodb:
1239+ num_units: 3
1240+
1241+infra:
1242+ inherits: [bootstack-stage2-complete, infra-secrets]
1243+ services:
1244+ ubuntu-nagios:
1245+ series: trusty
1246+ #charm: cs:trusty/ubuntu
1247+ #charm: ubuntu
1248+ to: lxc:infra
1249+ landscape-postgresql:
1250+ series: trusty
1251+ #charm: cs:trusty/postgresql
1252+ charm: postgresql
1253+ to: lxc:infra
1254+ options:
1255+ extra-packages: python-apt postgresql-contrib postgresql-.*-debversion
1256+ max_connections: 500
1257+ landscape-haproxy:
1258+ series: trusty
1259+ #charm: cs:precise/haproxy
1260+ charm: haproxy
1261+ to: lxc:infra
1262+ options:
1263+ enable_monitoring: True
1264+ monitoring_allowed_cidr: "0.0.0.0/0"
1265+ default_timeouts: "queue 60000, connect 5000, client 120000, server 120000"
1266+ # Don't deploy default haproxy service on port 80
1267+ services: ""
1268+
1269+ landscape-apache:
1270+ series: trusty
1271+ #charm: cs:trusty/apache2
1272+ charm: apache2
1273+ to: lxc:infra
1274+ expose: True
1275+ options:
1276+ enable_modules: proxy proxy_http proxy_balancer rewrite expires headers ssl
1277+ ssl_cert: SELFSIGNED
1278+ ssl_certlocation: apache2.cert
1279+ ssl_keylocation: apache2.key
1280+ nagios_check_http_params: '-H localhost -I 127.0.0.1 -u "/" -e 200,301,302'
1281+
1282+ landscape:
1283+ series: trusty
1284+ charm: landscape
1285+ to: lxc:infra
1286+ #branch: lp:~landscape/landscape-charm/stable
1287+ #branch: lp:~landscape/landscape-charm/trunk
1288+ options:
1289+ service-count: "2"
1290+ services: static appserver pingserver combo-loader async-frontend apiserver package-upload jobhandler package-search cron juju-sync
1291+ repository: http://ppa.launchpad.net/landscape/14.10/ubuntu
1292+ license-file: include-file://../secrets/landscape-license-file
1293+ admin-name: Bootstack Admin
1294+
1295+ landscape-msg:
1296+ series: trusty
1297+ to: lxc:infra
1298+ #branch: lp:~landscape/landscape-charm/stable
1299+ #branch: lp:~landscape/landscape-charm/trunk
1300+ charm: landscape
1301+ options:
1302+ services: msgserver
1303+ repository: http://ppa.launchpad.net/landscape/14.10/ubuntu
1304+ license-file: include-file://../secrets/landscape-license-file
1305+ relations:
1306+ - [landscape, rabbitmq-server]
1307+ - [landscape, landscape-haproxy]
1308+ #- [ntp, landscape]
1309+ - ["landscape:vhost-config", "landscape-apache:vhost-config"]
1310+ - ["landscape:db-admin", "landscape-postgresql:db-admin"]
1311+ - ["landscape-haproxy:website", "landscape-apache:reverseproxy"]
1312+ - [landscape-msg, rabbitmq-server]
1313+ - [landscape-msg, landscape-haproxy]
1314+ - ["landscape-msg:db-admin", "landscape-postgresql:db-admin"]
1315+
1316+ksplice:
1317+ inherits: [bootstack-stage2-complete, ksplice-secrets]
1318+ services:
1319+ ksplice:
1320+ charm: ksplice
1321+ relations:
1322+ - [ksplice, "openstack-ha:juju-info"]
1323+ - [ksplice, "infra:juju-info"]
1324+ - [ksplice, "nova-compute:juju-info"]
1325+
1326+# Below target includes all services + infra (ie final full stage)
1327+services-and-infra:
1328+ inherits: [bootstack-stage2-complete, nrpe, landscape-client, ksplice]
1329+
1330+# BoBstack
1331+config: &BOB_SWIFT_VIP '172.20.1.100'
1332+config: &BOB_CINDER_VIP '172.20.1.101'
1333+config: &BOB_GLANCE_VIP '172.20.1.102'
1334+config: &BOB_KEYSTONE_VIP '172.20.1.103'
1335+config: &BOB_MYSQL_VIP '172.20.1.104'
1336+config: &BOB_NCC_VIP '172.20.1.105'
1337+config: &BOB_DASH_VIP '172.20.1.106'
1338+config: &BOB_NAPI_VIP '172.20.1.107'
1339+config: &BOB_CEILOMETER_VIP '172.20.1.108'
1340+config: &BOB_CEPH_NET '172.20.0.0/23'
1341+bootstack-stage1-overcloud:
1342+ inherits: [bootstack-stage1]
1343+ services:
1344+ neutron-gateway:
1345+ options:
1346+ ext-port: ''
1347+ infra:
1348+ constraints: ''
1349+ nova-compute:
1350+ constraints: ''
1351+ options:
1352+ instances-path: /var/lib/nova/instances
1353+ migration-auth-type: null
1354+ mysql:
1355+ to: null
1356+ options:
1357+ vip: *BOB_MYSQL_VIP
1358+
1359+bootstack-stage2-overcloud:
1360+ inherits: [bootstack-stage1-overcloud, bootstack-stage2]
1361+ services:
1362+ mysql:
1363+ to: null
1364+ num_units: 2
1365+ ceph:
1366+ options:
1367+ ceph-cluster-network: *BOB_CEPH_NET
1368+ ceph-public-network: *BOB_CEPH_NET
1369+ osd-devices: /srv
1370+ ceph-osd:
1371+ options:
1372+ ceph-cluster-network: *BOB_CEPH_NET
1373+ ceph-public-network: *BOB_CEPH_NET
1374+ osd-devices: /srv
1375+ swift-storage-z1:
1376+ options:
1377+ block-device: /swift.img|1G
1378+ swift-storage-z2:
1379+ options:
1380+ block-device: /swift.img|1G
1381+ swift-storage-z3:
1382+ options:
1383+ block-device: /swift.img|1G
1384+ swift-proxy:
1385+ to: null
1386+ options:
1387+ vip: *BOB_SWIFT_VIP
1388+ ceilometer:
1389+ to: null
1390+ options:
1391+ vip: *BOB_CEILOMETER_VIP
1392+ heat:
1393+ to: null
1394+ cinder:
1395+ to: null
1396+ options:
1397+ vip: *BOB_CINDER_VIP
1398+ glance:
1399+ to: null
1400+ options:
1401+ vip: *BOB_GLANCE_VIP
1402+ keystone:
1403+ to: null
1404+ options:
1405+ vip: *BOB_KEYSTONE_VIP
1406+ ncc-memcached:
1407+ to: null
1408+ nova-cloud-controller:
1409+ to: null
1410+ options:
1411+ vip: *BOB_NCC_VIP
1412+ openstack-dashboard:
1413+ to: null
1414+ options:
1415+ vip: *BOB_DASH_VIP
1416+ neutron-api:
1417+ to: null
1418+ options:
1419+ vip: *BOB_NCC_VIP
1420+ glance-simplestreams-sync:
1421+ to: null
1422+ rabbitmq-server:
1423+ to: null
1424+ mongodb:
1425+ to: null
1426+
1427+bootstack-stage2-complete-overcloud:
1428+ inherits: [bootstack-stage2-overcloud, bootstack-stage2-relations]
1429+ services:
1430+ # HACK: add the 3rd units to all
1431+ rabbitmq-server:
1432+ num_units: 3
1433+ to: null
1434+ mysql:
1435+ num_units: 3
1436+ to: null
1437+ mongodb:
1438+ num_units: 3
1439+ to: null
1440
1441=== added file 'specs/bootstack/customer/configs/bootstack-repo'
1442--- specs/bootstack/customer/configs/bootstack-repo 1970-01-01 00:00:00 +0000
1443+++ specs/bootstack/customer/configs/bootstack-repo 2015-04-27 20:19:34 +0000
1444@@ -0,0 +1,24 @@
1445+ceilometer
1446+ceilometer-agent
1447+ceph
1448+ceph-osd
1449+cinder
1450+glance
1451+glance-simplestreams-sync
1452+hacluster
1453+heat
1454+keystone
1455+memcached
1456+mongodb
1457+mysql
1458+neutron-api
1459+neutron-openvswitch
1460+nova-cloud-controller
1461+nova-compute
1462+ntp
1463+openstack-dashboard
1464+quantum-gateway
1465+rabbitmq-server
1466+swift-proxy
1467+swift-storage
1468+ubuntu
1469
1470=== added file 'specs/bootstack/customer/configs/glance-streams.yaml'
1471--- specs/bootstack/customer/configs/glance-streams.yaml 1970-01-01 00:00:00 +0000
1472+++ specs/bootstack/customer/configs/glance-streams.yaml 2015-04-27 20:19:34 +0000
1473@@ -0,0 +1,7 @@
1474+[{ url: 'http://cloud-images.ubuntu.com/releases/',
1475+ name_prefix: 'ubuntu:released',
1476+ path: 'streams/v1/index.sjson',
1477+ max: 1,
1478+ item_filters: ['arch~(amd64|x86_64)', 'ftype~(disk1.img|disk.img)']
1479+ }
1480+]
1481
1482=== added file 'specs/bootstack/customer/configs/keystone_users.yaml'
1483--- specs/bootstack/customer/configs/keystone_users.yaml 1970-01-01 00:00:00 +0000
1484+++ specs/bootstack/customer/configs/keystone_users.yaml 2015-04-27 20:19:34 +0000
1485@@ -0,0 +1,5 @@
1486+- email: bootstack@dev.null
1487+ password: pass
1488+ roles: [_member_]
1489+ tenant: bootstack_project
1490+ username: bootstack
1491
1492=== added file 'specs/bootstack/customer/configs/network.yaml'
1493--- specs/bootstack/customer/configs/network.yaml 1970-01-01 00:00:00 +0000
1494+++ specs/bootstack/customer/configs/network.yaml 2015-04-27 20:19:34 +0000
1495@@ -0,0 +1,14 @@
1496+gre:
1497+ default_gateway: 172.22.0.1
1498+ start_floating_ip: 172.22.0.2
1499+ end_floating_ip: 172.22.0.62
1500+ external_dns: 8.8.8.8
1501+ external_net_cidr: 172.22.0.0/24
1502+ external_net_name: ext_net
1503+ external_subnet_name: subnet_ext_net
1504+ network_type: gre
1505+ private_net_cidr: 172.20.0.0/23
1506+ private_subnet_name: subnet_bootstack
1507+ private_net_name: net_bootstack
1508+ router_name: router_bootstack
1509+ dvr_enabled: False
1510
1511=== added directory 'specs/bootstack/ha_phased'
1512=== added directory 'specs/bootstack/ha_phased/icehouse'
1513=== added file 'specs/bootstack/ha_phased/icehouse/SPEC_INFO.txt'
1514--- specs/bootstack/ha_phased/icehouse/SPEC_INFO.txt 1970-01-01 00:00:00 +0000
1515+++ specs/bootstack/ha_phased/icehouse/SPEC_INFO.txt 2015-04-27 20:19:34 +0000
1516@@ -0,0 +1,2 @@
1517+This spec performs a phased ha deploy then runs tests to validate the
1518+deployment
1519
1520=== added symlink 'specs/bootstack/ha_phased/icehouse/allow_vips_addresses.py'
1521=== target is u'../../../../helper/setup/allow_vips_addresses.py'
1522=== added file 'specs/bootstack/ha_phased/icehouse/bootstack-charms-1504.bzr'
1523--- specs/bootstack/ha_phased/icehouse/bootstack-charms-1504.bzr 1970-01-01 00:00:00 +0000
1524+++ specs/bootstack/ha_phased/icehouse/bootstack-charms-1504.bzr 2015-04-27 20:19:34 +0000
1525@@ -0,0 +1,35 @@
1526+#openstack:
1527+ubuntu lp:charms/trusty/ubuntu;revno=10
1528+ubuntu-nagios lp:charms/trusty/ubuntu;revno=10
1529+ntp lp:charms/trusty/ntp;revno=19
1530+hacluster lp:charms/trusty/hacluster;revno=48
1531+nova-compute lp:charms/trusty/nova-compute;revno=130
1532+ceph lp:charms/trusty/ceph;revno=107
1533+ceph-osd lp:charms/trusty/ceph-osd;revno=42
1534+quantum-gateway lp:charms/trusty/quantum-gateway;revno=108
1535+neutron-api lp:charms/trusty/neutron-api;revno=110
1536+neutron-openvswitch lp:charms/trusty/neutron-openvswitch;revno=64
1537+swift-proxy lp:charms/trusty/swift-proxy;revno=97
1538+swift-storage lp:charms/trusty/swift-storage;revno=66
1539+ceilometer lp:charms/trusty/ceilometer;revno=84
1540+ceilometer-agent lp:charms/trusty/ceilometer-agent;revno=56
1541+heat lp:charms/trusty/heat;revno=33
1542+cinder lp:charms/trusty/cinder;revno=91
1543+glance lp:charms/trusty/glance;revno=113
1544+glance-simplestreams-sync lp:charms/trusty/glance-simplestreams-sync;revno=52
1545+keystone lp:charms/trusty/keystone;revno=148
1546+nova-cloud-controller lp:charms/trusty/nova-cloud-controller;revno=163
1547+openstack-dashboard lp:charms/trusty/openstack-dashboard;revno=64
1548+mysql lp:charms/trusty/percona-cluster;revno=57
1549+rabbitmq-server lp:charms/trusty/rabbitmq-server;revno=99
1550+mongodb lp:charms/trusty/mongodb;revno=68
1551+memcached lp:charms/trusty/memcached;revno=69
1552+
1553+## #landscape:
1554+## postgresql lp:charms/trusty/postgresql
1555+## apache2 lp:charms/trusty/apache2
1556+## haproxy lp:charms/trusty/haproxy
1557+## landscape lp:~landscape/landscape-charm/trunk
1558+
1559+#ksplice:
1560+#trusty/ksplice lp:~canonical-sysadmins/canonical-is-charms/ksplice
1561
1562=== added file 'specs/bootstack/ha_phased/icehouse/bootstack-charms-trunk.bzr'
1563--- specs/bootstack/ha_phased/icehouse/bootstack-charms-trunk.bzr 1970-01-01 00:00:00 +0000
1564+++ specs/bootstack/ha_phased/icehouse/bootstack-charms-trunk.bzr 2015-04-27 20:19:34 +0000
1565@@ -0,0 +1,35 @@
1566+#openstack:
1567+ubuntu lp:charms/trusty/ubuntu
1568+ubuntu-nagios lp:charms/trusty/ubuntu
1569+ntp lp:charms/trusty/ntp
1570+hacluster lp:charms/trusty/hacluster
1571+nova-compute lp:charms/trusty/nova-compute
1572+ceph lp:charms/trusty/ceph
1573+ceph-osd lp:charms/trusty/ceph-osd
1574+quantum-gateway lp:charms/trusty/quantum-gateway
1575+neutron-api lp:charms/trusty/neutron-api
1576+neutron-openvswitch lp:charms/trusty/neutron-openvswitch
1577+swift-proxy lp:charms/trusty/swift-proxy
1578+swift-storage lp:charms/trusty/swift-storage
1579+ceilometer lp:charms/trusty/ceilometer
1580+ceilometer-agent lp:charms/trusty/ceilometer-agent
1581+heat lp:charms/trusty/heat
1582+cinder lp:charms/trusty/cinder
1583+glance lp:charms/trusty/glance
1584+glance-simplestreams-sync lp:charms/trusty/glance-simplestreams-sync
1585+keystone lp:charms/trusty/keystone
1586+nova-cloud-controller lp:charms/trusty/nova-cloud-controller
1587+openstack-dashboard lp:charms/trusty/openstack-dashboard
1588+mysql lp:charms/trusty/percona-cluster
1589+rabbitmq-server lp:charms/trusty/rabbitmq-server
1590+mongodb lp:charms/trusty/mongodb
1591+memcached lp:charms/trusty/memcached
1592+
1593+## #landscape:
1594+## postgresql lp:charms/trusty/postgresql
1595+## apache2 lp:charms/trusty/apache2
1596+## haproxy lp:charms/trusty/haproxy
1597+## landscape lp:~landscape/landscape-charm/trunk
1598+
1599+#ksplice:
1600+#trusty/ksplice lp:~canonical-sysadmins/canonical-is-charms/ksplice
1601
1602=== added symlink 'specs/bootstack/ha_phased/icehouse/bootstack-charms.bzr'
1603=== target is u'../../customer/configs/bootstack-charms.bzr'
1604=== added symlink 'specs/bootstack/ha_phased/icehouse/bootstack-example.yaml'
1605=== target is u'../../customer/configs/bootstack-example.yaml'
1606=== added symlink 'specs/bootstack/ha_phased/icehouse/bootstack-repo'
1607=== target is u'../../customer/configs/bootstack-repo'
1608=== added symlink 'specs/bootstack/ha_phased/icehouse/bootstrap_ha.py'
1609=== target is u'../../../../helper/setup/bootstrap_ha.py'
1610=== added symlink 'specs/bootstack/ha_phased/icehouse/check_juju.py'
1611=== target is u'../../../../helper/tests/check_juju.py'
1612=== added symlink 'specs/bootstack/ha_phased/icehouse/collect-stable-trusty'
1613=== target is u'../../../../helper/collect/collect-stable-trusty'
1614=== added file 'specs/bootstack/ha_phased/icehouse/fix-lxcbr0.sh'
1615--- specs/bootstack/ha_phased/icehouse/fix-lxcbr0.sh 1970-01-01 00:00:00 +0000
1616+++ specs/bootstack/ha_phased/icehouse/fix-lxcbr0.sh 2015-04-27 20:19:34 +0000
1617@@ -0,0 +1,2 @@
1618+#!/bin/bash
1619+juju set openstack-ha new-lxc-network=True
1620
1621=== added symlink 'specs/bootstack/ha_phased/icehouse/image_setup.py'
1622=== target is u'../../../../helper/setup/image_setup.py'
1623=== added symlink 'specs/bootstack/ha_phased/icehouse/images.yaml'
1624=== target is u'../../../../helper/setup/images.yaml'
1625=== added symlink 'specs/bootstack/ha_phased/icehouse/keystone_setup.py'
1626=== target is u'../../../../helper/setup/keystone_setup.py'
1627=== added symlink 'specs/bootstack/ha_phased/icehouse/keystone_users.yaml'
1628=== target is u'../../customer/configs/keystone_users.yaml'
1629=== added file 'specs/bootstack/ha_phased/icehouse/manifest'
1630--- specs/bootstack/ha_phased/icehouse/manifest 1970-01-01 00:00:00 +0000
1631+++ specs/bootstack/ha_phased/icehouse/manifest 2015-04-27 20:19:34 +0000
1632@@ -0,0 +1,53 @@
1633+#verify config=bootstrap_ha.py M0_CONSTRAINTS='bootstrap' M12_CONSTRAINTS='openstack-ha' HA_SERVICENAME='openstack-ha'
1634+verify config=bootstrap_ha.py HA_SERVICENAME='openstack-ha'
1635+
1636+# script config=fix-lxcbr0.sh
1637+# bootstack: Collect the charm branches from Launchpad
1638+# collect config=bootstack-charms.bzr
1639+collect config=bootstack-charms-1504.bzr
1640+
1641+# bootstack: Copy the branches to the deployment directory
1642+repo config=bootstack-repo
1643+
1644+# bootstack: bootstack-example-secrets.yaml
1645+# secrets
1646+
1647+# Use juju deployer to create the units for the deployment
1648+# orig: deploy config=haphase1.yaml delay=0 target=${MOJO_SERIES}-icehouse
1649+# bootstack: deploy config=bootstack-staging.yaml local=bootstack-staging-secrets.yaml delay=0 target=bootstack-stage1
1650+deploy config=bootstack-example.yaml retry=2 delay=0 target=bootstack-stage1-overcloud
1651+
1652+# Check juju statuses are green and that hooks have finished
1653+verify config=check_juju.py
1654+
1655+# # Examine the available networks and set vips for services
1656+# script config=set_vips.py
1657+
1658+# Use juju deployer to add the relations for the deployment
1659+# orig: deploy config=haphase2.yaml delay=0 target=${MOJO_SERIES}-icehouse
1660+# bootstack: deploy config=bootstack-staging.yaml local=bootstack-staging-secrets.yaml delay=0 target=bootstack-stage2
1661+deploy config=bootstack-example.yaml retry=2 delay=0 target=bootstack-stage2-overcloud
1662+
1663+# overcloud BoB needs neutron ports to know about vips:
1664+script config=allow_vips_addresses.py
1665+
1666+deploy config=bootstack-example.yaml retry=2 delay=0 target=bootstack-stage2-relations
1667+deploy config=bootstack-example.yaml retry=2 delay=0 target=bootstack-stage2-complete-overcloud
1668+
1669+# Check juju statuses are green and that hooks have finished
1670+verify config=check_juju.py
1671+
1672+# Create sdn on overcloud
1673+script config=network_setup.py
1674+
1675+# Setup glance images
1676+script config=image_setup.py
1677+
1678+# Setup keystone users
1679+script config=keystone_setup.py
1680+
1681+# Create 1 * tiny cirros and 1 * small precise instances on the overcloud
1682+verify config=simple_os_checks.py MACHINES='cirros:m1.tiny:1 precise:m1.small:3' CLOUDINIT_WAIT="300" NET_NAME="net_bootstack"
1683+
1684+# Test obj store by sending and recieving files
1685+verify config=test_obj_store.py
1686
1687=== added symlink 'specs/bootstack/ha_phased/icehouse/network.yaml'
1688=== target is u'../../customer/configs/network.yaml'
1689=== added symlink 'specs/bootstack/ha_phased/icehouse/network_setup.py'
1690=== target is u'../../../../helper/setup/network_setup.py'
1691=== added symlink 'specs/bootstack/ha_phased/icehouse/repo-next-ha'
1692=== target is u'../../../../helper/collect/repo-next-ha'
1693=== added symlink 'specs/bootstack/ha_phased/icehouse/set_vips.py'
1694=== target is u'../../../../helper/setup/set_vips.py'
1695=== added symlink 'specs/bootstack/ha_phased/icehouse/simple_os_checks.py'
1696=== target is u'../../../../helper/tests/simple_os_checks.py'
1697=== added symlink 'specs/bootstack/ha_phased/icehouse/test_obj_store.py'
1698=== target is u'../../../../helper/tests/test_obj_store.py'
1699=== added symlink 'specs/bootstack/ha_phased/icehouse/utils'
1700=== target is u'../../../../helper/utils'
1701=== modified file 'specs/dev/nova_cc_legacy_neutron/simple_os_checks.py'
1702--- specs/dev/nova_cc_legacy_neutron/simple_os_checks.py 2015-01-22 11:31:51 +0000
1703+++ specs/dev/nova_cc_legacy_neutron/simple_os_checks.py 2015-04-27 20:19:34 +0000
1704@@ -13,11 +13,13 @@
1705 parser.add_argument("--active_wait", default=180)
1706 parser.add_argument("--cloudinit_wait", default=180)
1707 parser.add_argument("--ping_wait", default=180)
1708+ parser.add_argument("--net_name", default='private')
1709 options = parser.parse_args()
1710 machines = mojo_utils.parse_mojo_arg(options, 'machines', multiargs=True)
1711 active_wait = int(mojo_utils.parse_mojo_arg(options, 'active_wait'))
1712 cloudinit_wait = int(mojo_utils.parse_mojo_arg(options, 'cloudinit_wait'))
1713 ping_wait = int(mojo_utils.parse_mojo_arg(options, 'ping_wait'))
1714+ net_name = mojo_utils.parse_mojo_arg(options, 'net_name')
1715 logging.basicConfig(level=logging.INFO)
1716 overcloud_novarc = mojo_utils.get_overcloud_auth()
1717 novac = mojo_os_utils.get_nova_client(overcloud_novarc)
1718@@ -30,6 +32,7 @@
1719 flavor_name=flavor_name,
1720 number=int(count),
1721 privkey=priv_key,
1722+ net_name=net_name,
1723 active_wait=active_wait,
1724 cloudinit_wait=cloudinit_wait,
1725 ping_wait=ping_wait)
1726
1727=== modified file 'specs/dev/nova_cc_legacy_neutron/utils/mojo_os_utils.py'
1728--- specs/dev/nova_cc_legacy_neutron/utils/mojo_os_utils.py 2015-01-22 11:31:51 +0000
1729+++ specs/dev/nova_cc_legacy_neutron/utils/mojo_os_utils.py 2015-04-27 20:19:34 +0000
1730@@ -198,8 +198,8 @@
1731 mojo_utils.juju_set('neutron-gateway', 'ext-port=eth1')
1732
1733
1734-def create_tenant_network(neutron_client, tenant_id, net_name='private',
1735- shared=False, network_type='gre'):
1736+def create_tenant_network(neutron_client, tenant_id, net_name, shared=False,
1737+ network_type='gre'):
1738 networks = neutron_client.list_networks(name=net_name)
1739 if len(networks['networks']) == 0:
1740 logging.info('Creating network: %s',
1741@@ -221,7 +221,7 @@
1742 return network
1743
1744
1745-def create_external_network(neutron_client, tenant_id, net_name='ext_net',
1746+def create_external_network(neutron_client, tenant_id, net_name,
1747 network_type='gre'):
1748 networks = neutron_client.list_networks(name=net_name)
1749 if len(networks['networks']) == 0:
1750@@ -246,8 +246,8 @@
1751 return network
1752
1753
1754-def create_tenant_subnet(neutron_client, tenant_id, network, cidr, dhcp=True,
1755- subnet_name='private_subnet'):
1756+def create_tenant_subnet(neutron_client, tenant_id, network, cidr, subnet_name,
1757+ dhcp=True):
1758 # Create subnet
1759 subnets = neutron_client.list_subnets(name=subnet_name)
1760 if len(subnets['subnets']) == 0:
1761@@ -269,10 +269,9 @@
1762 return subnet
1763
1764
1765-def create_external_subnet(neutron_client, tenant_id, network,
1766+def create_external_subnet(neutron_client, tenant_id, network, subnet_name,
1767 default_gateway=None, cidr=None,
1768- start_floating_ip=None, end_floating_ip=None,
1769- subnet_name='ext_net_subnet'):
1770+ start_floating_ip=None, end_floating_ip=None):
1771 subnets = neutron_client.list_subnets(name=subnet_name)
1772 if len(subnets['subnets']) == 0:
1773 subnet_msg = {
1774@@ -314,20 +313,20 @@
1775 neutron_client.update_subnet(subnet['id'], msg)
1776
1777
1778-def create_provider_router(neutron_client, tenant_id):
1779- routers = neutron_client.list_routers(name='provider-router')
1780+def create_provider_router(neutron_client, tenant_id, router_name):
1781+ routers = neutron_client.list_routers(name=router_name)
1782 if len(routers['routers']) == 0:
1783 logging.info('Creating provider router for external network access')
1784 router_info = {
1785 'router': {
1786- 'name': 'provider-router',
1787+ 'name': router_name,
1788 'tenant_id': tenant_id
1789 }
1790 }
1791 router = neutron_client.create_router(router_info)['router']
1792- logging.info('New router created: %s', (router['id']))
1793+ logging.info('New router "%s" created: %s', router_name, router['id'])
1794 else:
1795- logging.warning('Router provider-router already exists.')
1796+ logging.warning('Router %s already exists.', router_name)
1797 router = routers['routers'][0]
1798 return router
1799
1800@@ -375,10 +374,10 @@
1801 return new_key.private_key
1802
1803
1804-def boot_instance(nova_client, image_name, flavor_name, key_name):
1805+def boot_instance(nova_client, image_name, flavor_name, key_name, net_name):
1806 image = nova_client.images.find(name=image_name)
1807 flavor = nova_client.flavors.find(name=flavor_name)
1808- net = nova_client.networks.find(label="private")
1809+ net = nova_client.networks.find(label=net_name)
1810 nics = [{'net-id': net.id}]
1811 # Obviously time may not produce a unique name
1812 vm_name = time.strftime("%Y%m%d%H%M%S")
1813@@ -506,13 +505,15 @@
1814
1815
1816 def boot_and_test(nova_client, image_name, flavor_name, number, privkey,
1817- active_wait=180, cloudinit_wait=180, ping_wait=180):
1818+ net_name, active_wait=180, cloudinit_wait=180,
1819+ ping_wait=180):
1820 image_config = mojo_utils.get_mojo_config('images.yaml')
1821 for counter in range(number):
1822 instance = boot_instance(nova_client,
1823 image_name=image_name,
1824 flavor_name=flavor_name,
1825- key_name='mojo')
1826+ key_name='mojo',
1827+ net_name=net_name)
1828 wait_for_boot(nova_client, instance.name,
1829 image_config[image_name]['bootstring'], active_wait,
1830 cloudinit_wait)
1831
1832=== modified file 'specs/dev/nova_cc_legacy_neutron/utils/mojo_utils.py'
1833--- specs/dev/nova_cc_legacy_neutron/utils/mojo_utils.py 2015-01-22 11:31:51 +0000
1834+++ specs/dev/nova_cc_legacy_neutron/utils/mojo_utils.py 2015-04-27 20:19:34 +0000
1835@@ -32,7 +32,7 @@
1836 else:
1837 services = [service for service in juju_status['services']]
1838 for svc in services:
1839- if 'units' in juju_status['services'][svc]:
1840+ if 'units' in juju_status['services'].get(svc, {}):
1841 for unit in juju_status['services'][svc]['units']:
1842 units.append(unit)
1843 return units

Subscribers

People subscribed via source and target branches