Merge lp:~openstack-charmers/charms/precise/nova-compute/ha-support into lp:~charmers/charms/precise/nova-compute/trunk

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 44
Proposed branch: lp:~openstack-charmers/charms/precise/nova-compute/ha-support
Merge into: lp:~charmers/charms/precise/nova-compute/trunk
Diff against target: 1428 lines (+982/-86)
10 files modified
config.yaml (+10/-0)
hooks/lib/nova/essex (+3/-2)
hooks/lib/nova/folsom (+4/-5)
hooks/lib/nova/grizzly (+97/-0)
hooks/lib/nova/nova-common (+39/-9)
hooks/lib/openstack-common (+605/-26)
hooks/nova-compute-common (+124/-19)
hooks/nova-compute-relations (+96/-24)
metadata.yaml (+3/-0)
revision (+1/-1)
To merge this branch: bzr merge lp:~openstack-charmers/charms/precise/nova-compute/ha-support
Reviewer Review Type Date Requested Status
charmers Pending
Review via email: mp+166340@code.launchpad.net

Description of the change

* Updates for grizzly support.

* Adds ssh-based live migration among compute nodes, brokered by the nova-cloud-controller service.

* Various bug fixes and cleanup

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2013-03-01 22:10:38 +0000
3+++ config.yaml 2013-05-29 18:05:36 +0000
4@@ -42,6 +42,16 @@
5 default: "yes"
6 type: string
7 description: Whether to run nova-api and nova-network on the compute nodes.
8+ enable-live-migration:
9+ default: False
10+ type: boolean
11+ description: Configure libvirt for live migration.
12+ migration-auth-type:
13+ default: sasl
14+ type: string
15+ description: |
16+ TCP authentication scheme for libvirt live migration. Available options
17+ include sasl or none.
18 # needed if using flatmanager
19 bridge-interface:
20 default: br100
21
22=== modified file 'hooks/lib/nova/essex'
23--- hooks/lib/nova/essex 2012-10-02 23:41:28 +0000
24+++ hooks/lib/nova/essex 2013-05-29 18:05:36 +0000
25@@ -12,7 +12,7 @@
26
27 local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
28 local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
29-
30+ local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
31 [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1
32 [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1
33 [[ -z "$conf_file" ]] && conf_file=$nova_conf
34@@ -22,7 +22,7 @@
35 pattern="--$key="
36 out=$pattern
37 ;;
38- "$api_conf") match="^$key = "
39+ "$api_conf"|"$libvirtd_conf") match="^$key = "
40 pattern="$match"
41 out="$key = "
42 ;;
43@@ -39,4 +39,5 @@
44 juju-log "$CHARM: Setting new option $key=$value in $conf_file"
45 echo "$out$value" >>$conf_file
46 fi
47+ CONFIG_CHANGED=True
48 }
49
50=== modified file 'hooks/lib/nova/folsom'
51--- hooks/lib/nova/folsom 2012-12-03 11:18:59 +0000
52+++ hooks/lib/nova/folsom 2013-05-29 18:05:36 +0000
53@@ -15,6 +15,7 @@
54 local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
55 local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
56 local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
57+ local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
58
59 [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
60 [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
61@@ -27,11 +28,8 @@
62 pattern="$key="
63 out=$pattern
64 ;;
65- "$api_conf") match="^$key = "
66- pattern="$match"
67- out="$key = "
68- ;;
69- "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
70+ "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
71+ "$libvirtd_conf")
72 match="^$key = "
73 pattern="$match"
74 out="$key = "
75@@ -64,6 +62,7 @@
76 fi
77 ;;
78 esac
79+ CONFIG_CHANGED="True"
80 }
81
82 # Upgrade Helpers
83
84=== added file 'hooks/lib/nova/grizzly'
85--- hooks/lib/nova/grizzly 1970-01-01 00:00:00 +0000
86+++ hooks/lib/nova/grizzly 2013-05-29 18:05:36 +0000
87@@ -0,0 +1,97 @@
88+#!/bin/bash -e
89+
90+# Folsom-specific functions
91+
92+nova_set_or_update() {
93+ # TODO: This needs to be shared among folsom, grizzly and beyond.
94+ # Set a config option in nova.conf or api-paste.ini, depending
95+ # Defaults to updating nova.conf
96+ local key="$1"
97+ local value="$2"
98+ local conf_file="$3"
99+ local section="${4:-DEFAULT}"
100+
101+ local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
102+ local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
103+ local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
104+ local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
105+ local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
106+ local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
107+
108+ [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
109+ [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
110+
111+ [[ -z "$conf_file" ]] && conf_file=$nova_conf
112+
113+ local pattern=""
114+ case "$conf_file" in
115+ "$nova_conf") match="^$key="
116+ pattern="$key="
117+ out=$pattern
118+ ;;
119+ "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
120+ "$libvirtd_conf")
121+ match="^$key = "
122+ pattern="$match"
123+ out="$key = "
124+ ;;
125+ *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
126+ esac
127+
128+ cat $conf_file | grep "$match$value" >/dev/null &&
129+ juju-log "$CHARM: $key=$value already in set in $conf_file" \
130+ && return 0
131+
132+ case $conf_file in
133+ "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
134+ python -c "
135+import ConfigParser
136+config = ConfigParser.RawConfigParser()
137+config.read('$conf_file')
138+config.set('$section','$key','$value')
139+with open('$conf_file', 'wb') as configfile:
140+ config.write(configfile)
141+"
142+ ;;
143+ *)
144+ if cat $conf_file | grep "$match" >/dev/null ; then
145+ juju-log "$CHARM: Updating $conf_file, $key=$value"
146+ sed -i "s|\($pattern\).*|\1$value|" $conf_file
147+ else
148+ juju-log "$CHARM: Setting new option $key=$value in $conf_file"
149+ echo "$out$value" >>$conf_file
150+ fi
151+ ;;
152+ esac
153+ CONFIG_CHANGED="True"
154+}
155+
156+# Upgrade Helpers
157+nova_pre_upgrade() {
158+ # Pre-upgrade helper. Caller should pass the version of OpenStack we are
159+ # upgrading from.
160+ return 0 # Nothing to do here, yet.
161+}
162+
163+nova_post_upgrade() {
164+ # Post-upgrade helper. Caller should pass the version of OpenStack we are
165+ # upgrading from.
166+ local upgrade_from="$1"
167+ juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly."
168+ # We only support folsom -> grizzly, currently.
169+ [[ "$upgrade_from" != "folsom" ]] &&
170+ error_out "Unsupported upgrade: $upgrade_from -> grizzly"
171+
172+ # This may be dangerous, if we are upgrading a number of units at once
173+ # and they all begin the same migration concurrently. Migrate only from
174+ # the cloud controller(s).
175+ if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
176+ juju-log "$CHARM: Migrating nova database."
177+ /usr/bin/nova-manage db sync
178+
179+ # Trigger a service restart on all other nova nodes.
180+ trigger_remote_service_restarts
181+ fi
182+
183+ juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly."
184+}
185
186=== modified file 'hooks/lib/nova/nova-common'
187--- hooks/lib/nova/nova-common 2012-12-06 10:21:10 +0000
188+++ hooks/lib/nova/nova-common 2013-05-29 18:05:36 +0000
189@@ -2,18 +2,21 @@
190
191 # Common utility functions used across all nova charms.
192
193+CONFIG_CHANGED=False
194+HOOKS_DIR="$CHARM_DIR/hooks"
195+
196 # Load the common OpenStack helper library.
197-if [[ -e $CHARM_DIR/lib/openstack-common ]] ; then
198- . $CHARM_DIR/lib/openstack-common
199+if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
200+ . $HOOKS_DIR/lib/openstack-common
201 else
202- juju-log "Couldn't load $CHARM_DIR/lib/opentack-common." && exit 1
203+ juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1
204 fi
205
206 set_or_update() {
207 # Update config flags in nova.conf or api-paste.ini.
208 # Config layout changed in Folsom, so this is now OpenStack release specific.
209 local rel=$(get_os_codename_package "nova-common")
210- . $CHARM_DIR/lib/nova/$rel
211+ . $HOOKS_DIR/lib/nova/$rel
212 nova_set_or_update $@
213 }
214
215@@ -32,9 +35,15 @@
216
217 configure_volume_service() {
218 local svc="$1"
219+ local cur_vers="$(get_os_codename_package "nova-common")"
220 case "$svc" in
221- "cinder") set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
222- "nova-volume") set_or_update "volume_api_class" "nova.volume.api.API" ;;
223+ "cinder")
224+ set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
225+ "nova-volume")
226+ # nova-volume only supported before grizzly.
227+ [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] &&
228+ set_or_update "volume_api_class" "nova.volume.api.API"
229+ ;;
230 *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc"
231 return 1 ;;
232 esac
233@@ -49,11 +58,32 @@
234 ;;
235 "FlatDHCPManager")
236 set_or_update "network_manager" "nova.network.manager.FlatDHCPManager"
237+
238+ if [[ "$CHARM" == "nova-compute" ]] ; then
239+ local flat_interface=$(config-get flat-interface)
240+ local ec2_host=$(relation-get ec2_host)
241+ set_or_update flat_inteface "$flat_interface"
242+ set_or_update ec2_dmz_host "$ec2_host"
243+
244+ # Ensure flat_interface has link.
245+ if ip link show $flat_interface >/dev/null 2>&1 ; then
246+ ip link set $flat_interface up
247+ fi
248+
249+ # work around (LP: #1035172)
250+ if [[ -e /dev/vhost-net ]] ; then
251+ iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \
252+ --checksum-fill
253+ fi
254+ fi
255+
256 ;;
257 "Quantum")
258 local local_ip=$(get_ip `unit-get private-address`)
259- [[ -n $local_ip ]] || juju-log "Unable to resolve local IP address" \
260- && exit 1
261+ [[ -n $local_ip ]] || {
262+ juju-log "Unable to resolve local IP address"
263+ exit 1
264+ }
265 set_or_update "network_api_class" "nova.network.quantumv2.api.API"
266 set_or_update "quantum_auth_strategy" "keystone"
267 set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF"
268@@ -101,7 +131,7 @@
269
270 # load the release helper library for pre/post upgrade hooks specific to the
271 # release we are upgrading to.
272- . $CHARM_DIR/lib/nova/$new_rel
273+ . $HOOKS_DIR/lib/nova/$new_rel
274
275 # new release specific pre-upgrade hook
276 nova_pre_upgrade "$orig_os_rel"
277
278=== modified file 'hooks/lib/openstack-common'
279--- hooks/lib/openstack-common 2012-12-06 10:17:41 +0000
280+++ hooks/lib/openstack-common 2013-05-29 18:05:36 +0000
281@@ -20,6 +20,9 @@
282
283 function service_ctl {
284 # control a specific service, or all (as defined by $SERVICES)
285+ # service restarts will only occur depending on global $CONFIG_CHANGED,
286+ # which should be updated in charm's set_or_update().
287+ local config_changed=${CONFIG_CHANGED:-True}
288 if [[ $1 == "all" ]] ; then
289 ctl="$SERVICES"
290 else
291@@ -37,12 +40,21 @@
292 "stop")
293 service_ctl_status $i && service $i stop || return 0 ;;
294 "restart")
295- service_ctl_status $i && service $i restart || service $i start ;;
296+ if [[ "$config_changed" == "True" ]] ; then
297+ service_ctl_status $i && service $i restart || service $i start
298+ fi
299+ ;;
300 esac
301 if [[ $? != 0 ]] ; then
302 juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
303 fi
304 done
305+ # all configs should have been reloaded on restart of all services, reset
306+ # flag if its being used.
307+ if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
308+ [[ "$ctl" == "all" ]]; then
309+ CONFIG_CHANGED="False"
310+ fi
311 }
312
313 function configure_install_source {
314@@ -70,46 +82,62 @@
315 # gpg key id tagged to end of url folloed by a |
316 url=$(echo $src | cut -d'|' -f1)
317 key=$(echo $src | cut -d'|' -f2)
318- if [[ -n "$key" ]] ; then
319- juju-log "$CHARM: Importing repository key: $key"
320- apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
321- juju-log "$CHARM WARN: Could not import key from keyserver: $key"
322- else
323- juju-log "$CHARM No repository key specified"
324- url="$src"
325- fi
326- echo $url > /etc/apt/sources.list.d/juju_deb.list
327+ juju-log "$CHARM: Importing repository key: $key"
328+ apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
329+ juju-log "$CHARM WARN: Could not import key from keyserver: $key"
330+ else
331+ juju-log "$CHARM No repository key specified."
332+ url="$src"
333 fi
334+ echo "$url" > /etc/apt/sources.list.d/juju_deb.list
335 return 0
336 fi
337
338 # Cloud Archive
339 if [[ "${src:0:6}" == "cloud:" ]] ; then
340- local archive_key="5EDB1B62EC4926EA"
341- local rel=$(echo $src | cut -d: -f2)
342- local u_rel=$(echo $rel | cut -d- -f1)
343- local ca_rel=$(echo $rel | cut -d- -f2)
344+
345+ # current os releases supported by the UCA.
346+ local cloud_archive_versions="folsom grizzly"
347+
348+ local ca_rel=$(echo $src | cut -d: -f2)
349+ local u_rel=$(echo $ca_rel | cut -d- -f1)
350+ local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
351
352 [[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
353 error_out "Cannot install from Cloud Archive pocket $src " \
354 "on this Ubuntu version ($DISTRIB_CODENAME)!"
355
356- if [[ "$ca_rel" == "folsom/staging" ]] ; then
357- # cloud archive staging is just a regular PPA.
358- add-apt-repository -y ppa:ubuntu-cloud-archive/folsom-staging
359+ valid_release=""
360+ for rel in $cloud_archive_versions ; do
361+ if [[ "$os_rel" == "$rel" ]] ; then
362+ valid_release=1
363+ juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
364+ fi
365+ done
366+ if [[ -z "$valid_release" ]] ; then
367+ error_out "OpenStack release ($os_rel) not supported by "\
368+ "the Ubuntu Cloud Archive."
369+ fi
370+
371+ # CA staging repos are standard PPAs.
372+ if echo $ca_rel | grep -q "staging" ; then
373+ add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
374 return 0
375 fi
376
377+ # the others are LP-external deb repos.
378 case "$ca_rel" in
379- "folsom"|"folsom/updates") pocket="precise-updates/folsom" ;;
380- "folsom/proposed") pocket="precise-proposed/folsom" ;;
381+ "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
382+ "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
383+ "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
384+ "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
385 *) error_out "Invalid Cloud Archive repo specified: $src"
386 esac
387
388+ apt-get -y install ubuntu-cloud-keyring
389 entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
390 echo "$entry" \
391 >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
392- apt-key adv --keyserver keyserver.ubuntu.com --recv-keys $archive_key
393 return 0
394 fi
395
396@@ -142,15 +170,16 @@
397 case "$ca_rel" in
398 "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
399 codename="folsom" ;;
400- "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzy/staging")
401- codename="grizly" ;;
402+ "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
403+ codename="grizzly" ;;
404 esac
405 fi
406 fi
407
408 # have a guess based on the deb string provided
409- if [[ "${rel:0:3}" == "deb" ]]; then
410- CODENAMES="diablo essex folsom grizzly"
411+ if [[ "${rel:0:3}" == "deb" ]] || \
412+ [[ "${rel:0:3}" == "ppa" ]] ; then
413+ CODENAMES="diablo essex folsom grizzly havana"
414 for cname in $CODENAMES; do
415 if echo $rel | grep -q $cname; then
416 codename=$cname
417@@ -161,12 +190,14 @@
418 }
419
420 get_os_codename_package() {
421- local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }')
422+ local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
423+ pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
424 case "${pkg_vers:0:6}" in
425 "2011.2") echo "diablo" ;;
426 "2012.1") echo "essex" ;;
427 "2012.2") echo "folsom" ;;
428 "2013.1") echo "grizzly" ;;
429+ "2013.2") echo "havana" ;;
430 esac
431 }
432
433@@ -175,7 +206,8 @@
434 "diablo") echo "2011.2" ;;
435 "essex") echo "2012.1" ;;
436 "folsom") echo "2012.2" ;;
437- "grizzly") echo "2012.3" ;;
438+ "grizzly") echo "2013.1" ;;
439+ "havana") echo "2013.2" ;;
440 esac
441 }
442
443@@ -200,3 +232,550 @@
444 pass
445 "
446 }
447+
448+# Common storage routines used by cinder, nova-volume and swift-storage.
449+clean_storage() {
450+ # if configured to overwrite existing storage, we unmount the block-dev
451+ # if mounted and clear any previous pv signatures
452+ local block_dev="$1"
453+ juju-log "Cleaining storage '$block_dev'"
454+ if grep -q "^$block_dev" /proc/mounts ; then
455+ mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
456+ juju-log "Unmounting $block_dev from $mp"
457+ umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
458+ fi
459+ if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
460+ juju-log "Removing existing LVM PV signatures from $block_dev"
461+
462+ # deactivate any volgroups that may be built on this dev
463+ vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
464+ if [[ -n "$vg" ]] ; then
465+ juju-log "Deactivating existing volume group: $vg"
466+ vgchange -an "$vg" ||
467+ error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
468+ fi
469+ echo "yes" | pvremove -ff "$block_dev" ||
470+ error_out "Could not pvremove $block_dev"
471+ else
472+ juju-log "Zapping disk of all GPT and MBR structures"
473+ sgdisk --zap-all $block_dev ||
474+ error_out "Unable to zap $block_dev"
475+ fi
476+}
477+
478+function get_block_device() {
479+ # given a string, return full path to the block device for that
480+ # if input is not a block device, find a loopback device
481+ local input="$1"
482+
483+ case "$input" in
484+ /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
485+ echo "$input"; return 0;;
486+ /*) :;;
487+ *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
488+ echo "/dev/$input"; return 0;;
489+ esac
490+
491+ # this represents a file
492+ # support "/path/to/file|5G"
493+ local fpath size oifs="$IFS"
494+ if [ "${input#*|}" != "${input}" ]; then
495+ size=${input##*|}
496+ fpath=${input%|*}
497+ else
498+ fpath=${input}
499+ size=5G
500+ fi
501+
502+ ## loop devices are not namespaced. This is bad for containers.
503+ ## it means that the output of 'losetup' may have the given $fpath
504+ ## in it, but that may not represent this containers $fpath, but
505+ ## another containers. To address that, we really need to
506+ ## allow some uniq container-id to be expanded within path.
507+ ## TODO: find a unique container-id that will be consistent for
508+ ## this container throughout its lifetime and expand it
509+ ## in the fpath.
510+ # fpath=${fpath//%{id}/$THAT_ID}
511+
512+ local found=""
513+ # parse through 'losetup -a' output, looking for this file
514+ # output is expected to look like:
515+ # /dev/loop0: [0807]:961814 (/tmp/my.img)
516+ found=$(losetup -a |
517+ awk 'BEGIN { found=0; }
518+ $3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
519+ END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
520+ f="($fpath)")
521+
522+ if [ $? -ne 0 ]; then
523+ echo "multiple devices found for $fpath: $found" 1>&2
524+ return 1;
525+ fi
526+
527+ [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
528+
529+ if [ -n "$found" ]; then
530+ echo "confused, $found is not a block device for $fpath";
531+ return 1;
532+ fi
533+
534+ # no existing device was found, create one
535+ mkdir -p "${fpath%/*}"
536+ truncate --size "$size" "$fpath" ||
537+ { echo "failed to create $fpath of size $size"; return 1; }
538+
539+ found=$(losetup --find --show "$fpath") ||
540+ { echo "failed to setup loop device for $fpath" 1>&2; return 1; }
541+
542+ echo "$found"
543+ return 0
544+}
545+
546+HAPROXY_CFG=/etc/haproxy/haproxy.cfg
547+HAPROXY_DEFAULT=/etc/default/haproxy
548+##########################################################################
549+# Description: Configures HAProxy services for Openstack API's
550+# Parameters:
551+# Space delimited list of service:port:mode combinations for which
552+# haproxy service configuration should be generated for. The function
553+# assumes the name of the peer relation is 'cluster' and that every
554+# service unit in the peer relation is running the same services.
555+#
556+# Services that do not specify :mode in parameter will default to http.
557+#
558+# Example
559+# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
560+##########################################################################
561+configure_haproxy() {
562+ local address=`unit-get private-address`
563+ local name=${JUJU_UNIT_NAME////-}
564+ cat > $HAPROXY_CFG << EOF
565+global
566+ log 127.0.0.1 local0
567+ log 127.0.0.1 local1 notice
568+ maxconn 20000
569+ user haproxy
570+ group haproxy
571+ spread-checks 0
572+
573+defaults
574+ log global
575+ mode http
576+ option httplog
577+ option dontlognull
578+ retries 3
579+ timeout queue 1000
580+ timeout connect 1000
581+ timeout client 30000
582+ timeout server 30000
583+
584+listen stats :8888
585+ mode http
586+ stats enable
587+ stats hide-version
588+ stats realm Haproxy\ Statistics
589+ stats uri /
590+ stats auth admin:password
591+
592+EOF
593+ for service in $@; do
594+ local service_name=$(echo $service | cut -d : -f 1)
595+ local haproxy_listen_port=$(echo $service | cut -d : -f 2)
596+ local api_listen_port=$(echo $service | cut -d : -f 3)
597+ local mode=$(echo $service | cut -d : -f 4)
598+ [[ -z "$mode" ]] && mode="http"
599+ juju-log "Adding haproxy configuration entry for $service "\
600+ "($haproxy_listen_port -> $api_listen_port)"
601+ cat >> $HAPROXY_CFG << EOF
602+listen $service_name 0.0.0.0:$haproxy_listen_port
603+ balance roundrobin
604+ mode $mode
605+ option ${mode}log
606+ server $name $address:$api_listen_port check
607+EOF
608+ local r_id=""
609+ local unit=""
610+ for r_id in `relation-ids cluster`; do
611+ for unit in `relation-list -r $r_id`; do
612+ local unit_name=${unit////-}
613+ local unit_address=`relation-get -r $r_id private-address $unit`
614+ if [ -n "$unit_address" ]; then
615+ echo " server $unit_name $unit_address:$api_listen_port check" \
616+ >> $HAPROXY_CFG
617+ fi
618+ done
619+ done
620+ done
621+ echo "ENABLED=1" > $HAPROXY_DEFAULT
622+ service haproxy restart
623+}
624+
625+##########################################################################
626+# Description: Query HA interface to determine is cluster is configured
627+# Returns: 0 if configured, 1 if not configured
628+##########################################################################
629+is_clustered() {
630+ local r_id=""
631+ local unit=""
632+ for r_id in $(relation-ids ha); do
633+ if [ -n "$r_id" ]; then
634+ for unit in $(relation-list -r $r_id); do
635+ clustered=$(relation-get -r $r_id clustered $unit)
636+ if [ -n "$clustered" ]; then
637+ juju-log "Unit is haclustered"
638+ return 0
639+ fi
640+ done
641+ fi
642+ done
643+ juju-log "Unit is not haclustered"
644+ return 1
645+}
646+
647+##########################################################################
648+# Description: Return a list of all peers in cluster relations
649+##########################################################################
650+peer_units() {
651+ local peers=""
652+ local r_id=""
653+ for r_id in $(relation-ids cluster); do
654+ peers="$peers $(relation-list -r $r_id)"
655+ done
656+ echo $peers
657+}
658+
659+##########################################################################
660+# Description: Determines whether the current unit is the oldest of all
661+# its peers - supports partial leader election
662+# Returns: 0 if oldest, 1 if not
663+##########################################################################
664+oldest_peer() {
665+ peers=$1
666+ local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
667+ for peer in $peers; do
668+ echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
669+ local r_unit_no=$(echo $peer | cut -d / -f 2)
670+ if (($r_unit_no<$l_unit_no)); then
671+ juju-log "Not oldest peer; deferring"
672+ return 1
673+ fi
674+ done
675+ juju-log "Oldest peer; might take charge?"
676+ return 0
677+}
678+
679+##########################################################################
680+# Description: Determines whether the current service units is the
681+# leader within a) a cluster of its peers or b) across a
682+# set of unclustered peers.
683+# Parameters: CRM resource to check ownership of if clustered
684+# Returns: 0 if leader, 1 if not
685+##########################################################################
686+eligible_leader() {
687+ if is_clustered; then
688+ if ! is_leader $1; then
689+ juju-log 'Deferring action to CRM leader'
690+ return 1
691+ fi
692+ else
693+ peers=$(peer_units)
694+ if [ -n "$peers" ] && ! oldest_peer "$peers"; then
695+ juju-log 'Deferring action to oldest service unit.'
696+ return 1
697+ fi
698+ fi
699+ return 0
700+}
701+
702+##########################################################################
703+# Description: Query Cluster peer interface to see if peered
704+# Returns: 0 if peered, 1 if not peered
705+##########################################################################
706+is_peered() {
707+ local r_id=$(relation-ids cluster)
708+ if [ -n "$r_id" ]; then
709+ if [ -n "$(relation-list -r $r_id)" ]; then
710+ juju-log "Unit peered"
711+ return 0
712+ fi
713+ fi
714+ juju-log "Unit not peered"
715+ return 1
716+}
717+
718+##########################################################################
719+# Description: Determines whether host is owner of clustered services
720+# Parameters: Name of CRM resource to check ownership of
721+# Returns: 0 if leader, 1 if not leader
722+##########################################################################
723+is_leader() {
724+ hostname=`hostname`
725+ if [ -x /usr/sbin/crm ]; then
726+ if crm resource show $1 | grep -q $hostname; then
727+ juju-log "$hostname is cluster leader."
728+ return 0
729+ fi
730+ fi
731+ juju-log "$hostname is not cluster leader."
732+ return 1
733+}
734+
735+##########################################################################
736+# Description: Determines whether enough data has been provided in
737+# configuration or relation data to configure HTTPS.
738+# Parameters: None
739+# Returns: 0 if HTTPS can be configured, 1 if not.
740+##########################################################################
741+https() {
742+ local r_id=""
743+ if [[ -n "$(config-get ssl_cert)" ]] &&
744+ [[ -n "$(config-get ssl_key)" ]] ; then
745+ return 0
746+ fi
747+ for r_id in $(relation-ids identity-service) ; do
748+ for unit in $(relation-list -r $r_id) ; do
749+ if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
750+ [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
751+ [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
752+ [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
753+ return 0
754+ fi
755+ done
756+ done
757+ return 1
758+}
759+
760+##########################################################################
761+# Description: For a given number of port mappings, configures apache2
762+# HTTPs local reverse proxying using certficates and keys provided in
763+# either configuration data (preferred) or relation data. Assumes ports
764+# are not in use (calling charm should ensure that).
765+# Parameters: Variable number of proxy port mappings as
766+# $internal:$external.
767+# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
768+##########################################################################
769+enable_https() {
770+ local port_maps="$@"
771+ local http_restart=""
772+ juju-log "Enabling HTTPS for port mappings: $port_maps."
773+
774+ # allow overriding of keystone provided certs with those set manually
775+ # in config.
776+ local cert=$(config-get ssl_cert)
777+ local key=$(config-get ssl_key)
778+ local ca_cert=""
779+ if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
780+ juju-log "Inspecting identity-service relations for SSL certificate."
781+ local r_id=""
782+ cert=""
783+ key=""
784+ ca_cert=""
785+ for r_id in $(relation-ids identity-service) ; do
786+ for unit in $(relation-list -r $r_id) ; do
787+ [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
788+ [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
789+ [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
790+ done
791+ done
792+ [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
793+ [[ -n "$key" ]] && key=$(echo $key | base64 -di)
794+ [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
795+ else
796+ juju-log "Using SSL certificate provided in service config."
797+ fi
798+
799+ [[ -z "$cert" ]] || [[ -z "$key" ]] &&
800+ juju-log "Expected but could not find SSL certificate data, not "\
801+ "configuring HTTPS!" && return 1
802+
803+ apt-get -y install apache2
804+ a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
805+ http_restart=1
806+
807+ mkdir -p /etc/apache2/ssl/$CHARM
808+ echo "$cert" >/etc/apache2/ssl/$CHARM/cert
809+ echo "$key" >/etc/apache2/ssl/$CHARM/key
810+ if [[ -n "$ca_cert" ]] ; then
811+ juju-log "Installing Keystone supplied CA cert."
812+ echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
813+ update-ca-certificates --fresh
814+
815+ # XXX TODO: Find a better way of exporting this?
816+ if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
817+ [[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
818+ rm -rf /var/www/keystone_juju_ca_cert.crt
819+ ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
820+ /var/www/keystone_juju_ca_cert.crt
821+ fi
822+
823+ fi
824+ for port_map in $port_maps ; do
825+ local ext_port=$(echo $port_map | cut -d: -f1)
826+ local int_port=$(echo $port_map | cut -d: -f2)
827+ juju-log "Creating apache2 reverse proxy vhost for $port_map."
828+ cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
829+Listen $ext_port
830+NameVirtualHost *:$ext_port
831+<VirtualHost *:$ext_port>
832+ ServerName $(unit-get private-address)
833+ SSLEngine on
834+ SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
835+ SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
836+ ProxyPass / http://localhost:$int_port/
837+ ProxyPassReverse / http://localhost:$int_port/
838+ ProxyPreserveHost on
839+</VirtualHost>
840+<Proxy *>
841+ Order deny,allow
842+ Allow from all
843+</Proxy>
844+<Location />
845+ Order allow,deny
846+ Allow from all
847+</Location>
848+END
849+ a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
850+ http_restart=1
851+ done
852+ if [[ -n "$http_restart" ]] ; then
853+ service apache2 restart
854+ fi
855+}
856+
857+##########################################################################
858+# Description: Ensure HTTPS reverse proxying is disabled for given port
859+# mappings.
860+# Parameters: Variable number of proxy port mappings as
861+# $internal:$external.
862+# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
863+##########################################################################
864+disable_https() {
865+ local port_maps="$@"
866+ local http_restart=""
867+ juju-log "Ensuring HTTPS disabled for $port_maps."
868+ ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
869+ for port_map in $port_maps ; do
870+ local ext_port=$(echo $port_map | cut -d: -f1)
871+ local int_port=$(echo $port_map | cut -d: -f2)
872+ if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
873+ juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
874+ a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
875+ http_restart=1
876+ fi
877+ done
878+ if [[ -n "$http_restart" ]] ; then
879+ service apache2 restart
880+ fi
881+}
882+
883+
884+##########################################################################
885+# Description: Ensures HTTPS is either enabled or disabled for given port
886+# mapping.
887+# Parameters: Variable number of proxy port mappings as
888+# $internal:$external.
889+# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
890+##########################################################################
891+setup_https() {
892+ # configure https via apache reverse proxying either
893+ # using certs provided by config or keystone.
894+ [[ -z "$CHARM" ]] &&
895+ error_out "setup_https(): CHARM not set."
896+ if ! https ; then
897+ disable_https $@
898+ else
899+ enable_https $@
900+ fi
901+}
902+
903+##########################################################################
904+# Description: Determine correct API server listening port based on
905+# existence of HTTPS reverse proxy and/or haproxy.
906+# Paremeters: The standard public port for given service.
907+# Returns: The correct listening port for API service.
908+##########################################################################
909+determine_api_port() {
910+ local public_port="$1"
911+ local i=0
912+ ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
913+ https >/dev/null 2>&1 && i=$[$i + 1]
914+ echo $[$public_port - $[$i * 10]]
915+}
916+
917+##########################################################################
918+# Description: Determine correct proxy listening port based on public IP +
919+# existence of HTTPS reverse proxy.
920+# Paremeters: The standard public port for given service.
921+# Returns: The correct listening port for haproxy service public address.
922+##########################################################################
923+determine_haproxy_port() {
924+ local public_port="$1"
925+ local i=0
926+ https >/dev/null 2>&1 && i=$[$i + 1]
927+ echo $[$public_port - $[$i * 10]]
928+}
929+
930+##########################################################################
931+# Description: Print the value for a given config option in an OpenStack
932+# .ini style configuration file.
933+# Parameters: File path, option to retrieve, optional
934+# section name (default=DEFAULT)
935+# Returns: Prints value if set, prints nothing otherwise.
936+##########################################################################
937+local_config_get() {
938+ # return config values set in openstack .ini config files.
939+ # default placeholders starting (eg, %AUTH_HOST%) treated as
940+ # unset values.
941+ local file="$1"
942+ local option="$2"
943+ local section="$3"
944+ [[ -z "$section" ]] && section="DEFAULT"
945+ python -c "
946+import ConfigParser
947+config = ConfigParser.RawConfigParser()
948+config.read('$file')
949+try:
950+ value = config.get('$section', '$option')
951+except:
952+ print ''
953+ exit(0)
954+if value.startswith('%'): exit(0)
955+print value
956+"
957+}
958+
959+##########################################################################
960+# Description: Creates an rc file exporting environment variables to a
961+# script_path local to the charm's installed directory.
962+# Any charm scripts run outside the juju hook environment can source this
963+# scriptrc to obtain updated config information necessary to perform health
964+# checks or service changes
965+#
966+# Parameters:
967+# An array of '=' delimited ENV_VAR:value combinations to export.
968+# If optional script_path key is not provided in the array, script_path
969+# defaults to scripts/scriptrc
970+##########################################################################
971+function save_script_rc {
972+ if [ ! -n "$JUJU_UNIT_NAME" ]; then
973+ echo "Error: Missing JUJU_UNIT_NAME environment variable"
974+ exit 1
975+ fi
976+ # our default unit_path
977+ unit_path="$CHARM_DIR/scripts/scriptrc"
978+ echo $unit_path
979+ tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
980+
981+ echo "#!/bin/bash" > $tmp_rc
982+ for env_var in "${@}"
983+ do
984+ if `echo $env_var | grep -q script_path`; then
985+ # well then we need to reset the new unit-local script path
986+ unit_path="$CHARM_DIR/${env_var/script_path=/}"
987+ else
988+ echo "export $env_var" >> $tmp_rc
989+ fi
990+ done
991+ chmod 755 $tmp_rc
992+ mv $tmp_rc $unit_path
993+}
994
995=== modified file 'hooks/nova-compute-common'
996--- hooks/nova-compute-common 2013-03-04 19:58:18 +0000
997+++ hooks/nova-compute-common 2013-05-29 18:05:36 +0000
998@@ -7,6 +7,8 @@
999 NOVA_CONF=$(config-get nova-config)
1000 API_CONF="/etc/nova/api-paste.ini"
1001 QUANTUM_CONF="/etc/quantum/quantum.conf"
1002+LIBVIRTD_CONF="/etc/libvirt/libvirtd.conf"
1003+HOOKS_DIR="$CHARM_DIR/hooks"
1004 MULTI_HOST=$(config-get multi-host)
1005
1006 if [ -f /etc/nova/nm.conf ]; then
1007@@ -35,10 +37,10 @@
1008 ;;
1009 esac
1010
1011-if [[ -e $CHARM_DIR/lib/nova/nova-common ]] ; then
1012- . $CHARM_DIR/lib/nova/nova-common
1013+if [[ -e $HOOKS_DIR/lib/nova/nova-common ]] ; then
1014+ . $HOOKS_DIR/lib/nova/nova-common
1015 else
1016- juju-log "$CHARM: Couldn't load $CHARM_DIR/lib/nova-common" && exit 1
1017+ juju-log "$CHARM: Couldn't load $HOOKS_DIR/lib/nova-common" && exit 1
1018 fi
1019
1020 determine_compute_package() {
1021@@ -52,7 +54,7 @@
1022 "xen") compute_pkg="nova-compute-xen";;
1023 "uml") compute_pkg="nova-compute-uml";;
1024 "lxc") compute_pkg="nova-compute-lxc";;
1025- *) error_out" ERROR: Unsupported virt_type=$virt_type";;
1026+ *) error_out "ERROR: Unsupported virt_type=$virt_type";;
1027 esac
1028 echo "$compute_pkg"
1029 }
1030@@ -98,17 +100,13 @@
1031 exit 1
1032 }
1033
1034- # Store the network manager and quantum plugin
1035- # for use in later hook invocations
1036- [[ -n $net_manager ]] && echo $net_manager > /etc/nova/nm.conf
1037- [[ -n $quantum_plugin ]] && echo $quantum_plugin > /etc/nova/quantum_plugin.conf
1038-
1039 case $net_manager in
1040 "FlatManager"|"FlatDHCPManager")
1041 if [[ "$MULTI_HOST" == "yes" ]] ; then
1042 apt-get -y install nova-api nova-network
1043 SERVICES="$SERVICES nova-api nova-network"
1044 fi
1045+ [[ -n $net_manager ]] && echo $net_manager > /etc/nova/nm.conf
1046 ;;&
1047 "FlatManager")
1048 local bridge_ip=$(config-get bridge-ip)
1049@@ -129,18 +127,39 @@
1050 set_or_update ec2_dmz_host $ec2_host
1051 ;;
1052 "Quantum")
1053- local keystone_host=$(relation-get keystone_host)
1054- [[ -z $keystone_host ]] && juju-log "nova-compute: Missing keystone host" \
1055- && exit 0
1056+ local keystone_host="$(relation-get keystone_host)"
1057+ local auth_port="$(relation-get auth_port)"
1058+ local quantum_url="$(relation-get quantum_url)"
1059+ local quantum_admin_tenant_name="$(relation-get service_tenant)"
1060+ local quantum_admin_username="$(relation-get service_username)"
1061+ local quantum_admin_password="$(relation-get service_password)"
1062+
1063+ # might end up here before nova-c-c has processed keystone hooks
1064+ [[ -z "$keystone_host" ]] ||
1065+ [[ -z "$auth_port" ]] ||
1066+ [[ -z "$quantum_url" ]] ||
1067+ [[ -z "$quantum_admin_tenant_name" ]] ||
1068+ [[ -z "$quantum_admin_username" ]] ||
1069+ [[ -z "$quantum_admin_password" ]] &&
1070+ juju-log "nova-compute: Missing required data for Quantum config." &&
1071+ exit 0
1072 set_or_update "network_api_class" "nova.network.quantumv2.api.API"
1073 set_or_update "quantum_auth_strategy" "keystone"
1074- set_or_update "quantum_url" "http://$(relation-get quantum_host):9696"
1075- set_or_update "quantum_admin_tenant_name" "$(relation-get service_tenant)"
1076- set_or_update "quantum_admin_username" "$(relation-get service_username)"
1077- set_or_update "quantum_admin_password" "$(relation-get service_password)"
1078+ set_or_update "quantum_url" "$quantum_url"
1079+ set_or_update "quantum_admin_tenant_name" "$quantum_admin_tenant_name"
1080+ set_or_update "quantum_admin_username" "$quantum_admin_username"
1081+ set_or_update "quantum_admin_password" "$quantum_admin_password"
1082 set_or_update "quantum_admin_auth_url" \
1083- "http://$(relation-get keystone_host):$(relation-get auth_port)/v2.0"
1084- set_or_update "force_config_drive" "True"
1085+ "http://$keystone_host:$auth_port/v2.0"
1086+
1087+ local cur=$(get_os_codename_package "nova-common")
1088+ if dpkg --compare-versions $(get_os_version_codename $cur) gt '2012.2'; then
1089+ # Grizzly onwards supports metadata proxy so forcing use of config
1090+ # drive is not required.
1091+ set_or_update "force_config_drive" "False"
1092+ else
1093+ set_or_update "force_config_drive" "True"
1094+ fi
1095 case $quantum_plugin in
1096 "ovs")
1097 apt-get -y install openvswitch-datapath-dkms
1098@@ -157,6 +176,8 @@
1099 ;;
1100 esac
1101 set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF"
1102+ [[ -n $net_manager ]] && echo $net_manager > /etc/nova/nm.conf
1103+ [[ -n $quantum_plugin ]] && echo $quantum_plugin > /etc/nova/quantum_plugin.conf
1104 ;;
1105 *) echo "ERROR: Invalid network manager $1" && exit 1 ;;
1106 esac
1107@@ -170,6 +191,83 @@
1108 fi
1109 }
1110
1111+function initialize_ssh_keys {
1112+ # generate ssh keypair for root if one does not exist or
1113+ # the pari is not complete.
1114+ local pub="/root/.ssh/id_rsa"
1115+ local priv="/root/.ssh/id_rsa.pub"
1116+ if [[ -e $pub ]] &&
1117+ [[ -e $priv ]] ; then
1118+ juju-log "$CHARM: SSH credentials already exist for root."
1119+ return 0
1120+ fi
1121+ juju-log "$CHARM: Initializing new SSH key pair for live migration."
1122+ [[ -e $pub ]] && mv $pub $pub.$(date +"%s")
1123+ [[ -e $priv ]] && mv $priv $priv.$(date +"%s")
1124+ local keyname=$(echo $JUJU_UNIT_NAME | sed -e 's,/,-,g')
1125+ echo -e "\n" | ssh-keygen -C "$keyname" -N ""
1126+}
1127+
1128+function libvirt_tcp_listening {
1129+ # toggle libvirtd's tcp listening in both /etc/default/libvirt-bin
1130+ # and /etc/libvirt/libvirtd.conf.
1131+ local toggle="$1"
1132+ juju-log "$CHARM: Configuring libvirt tcp listening: $toggle."
1133+ local cur_opts=$(grep "^libvirtd_opts" /etc/default/libvirt-bin |
1134+ cut -d= -f2 | sed -e 's/\"//g')
1135+ local new_opts=""
1136+
1137+ if [[ "$toggle" == "on" ]] ; then
1138+ if [[ -z "$cur_opts" ]] ; then
1139+ echo "libvirtd_opts=\"-d -l\"" >>/etc/default/libvirt-bin
1140+ elif ! echo "$cur_opts" | grep -q "\-l" ; then
1141+ new_opts="$cur_opts -l"
1142+ sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin
1143+ fi
1144+ set_or_update "listen_tcp" 1 $LIBVIRTD_CONF
1145+ elif [[ "$toggle" == "off" ]] ; then
1146+ if echo "$cur_opts" | grep -q "\-l" ; then
1147+ new_opts=$(echo $cur_opts | sed -e 's/\-l//g')
1148+ fi
1149+ set_or_update "listen_tcp" 0 $LIBVIRTD_CONF
1150+ fi
1151+
1152+ [[ -n "$new_opts" ]] &&
1153+ sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin
1154+
1155+ return 0
1156+}
1157+
1158+
1159+function configure_migration {
1160+ local enable_migration=$(config-get enable-live-migration)
1161+
1162+ if [[ "$enable_migration" != "True" ]] &&
1163+ [[ "$enable_migraiton" != "true" ]] ; then
1164+ libvirt_tcp_listening "off"
1165+ return $?
1166+ fi
1167+
1168+ libvirt_tcp_listening "on"
1169+
1170+ case "$(config-get migration-auth-type)" in
1171+ "none"|"None")
1172+ set_or_update "listen_tls" 0 $LIBVIRTD_CONF
1173+ set_or_update "auth_tcp" "\"none\"" $LIBVIRTD_CONF
1174+ ;;
1175+ "ssh")
1176+ set_or_update "listen_tls" 0 $LIBVIRTD_CONF
1177+ set_or_update "live_migration_uri" "qemu+ssh://%s/system" $NOVA_CONF
1178+ initialize_ssh_keys
1179+ # check in with nova-c-c and register our new key.
1180+ for id in $(relation-ids cloud-compute) ; do
1181+ compute_joined $id
1182+ done
1183+ service_ctl nova-compute restart ;;
1184+ "sasl") return 0 ;;
1185+ esac
1186+}
1187+
1188 function configure_libvirt {
1189 cat > /etc/libvirt/qemu.conf << EOF
1190 # File installed by Juju nova-compute charm
1191@@ -180,5 +278,12 @@
1192 "/dev/rtc", "/dev/hpet", "/dev/net/tun",
1193 ]
1194 EOF
1195- service libvirt-bin reload
1196+ configure_migration
1197+ service libvirt-bin restart
1198+}
1199+
1200+function migration_enabled {
1201+ local migration="$(config-get enable-live-migration)"
1202+ [[ "$migration" == "true" ]] || [[ "$migration" == "True" ]] && return 0
1203+ return 1
1204 }
1205
1206=== modified file 'hooks/nova-compute-relations'
1207--- hooks/nova-compute-relations 2013-03-04 19:58:18 +0000
1208+++ hooks/nova-compute-relations 2013-05-29 18:05:36 +0000
1209@@ -1,11 +1,11 @@
1210 #!/bin/bash -e
1211-CHARM_DIR=$(dirname $0)
1212+HOOKS_DIR="$CHARM_DIR/hooks"
1213 ARG0=${0##*/}
1214
1215-if [[ -e $CHARM_DIR/nova-compute-common ]] ; then
1216- . $CHARM_DIR/nova-compute-common
1217+if [[ -e $HOOKS_DIR/nova-compute-common ]] ; then
1218+ . $HOOKS_DIR/nova-compute-common
1219 else
1220- juju-log "ERROR: Could not load nova-compute-common from $CHARM_DIR"
1221+ juju-log "ERROR: Could not load nova-compute-common from $HOOKS_DIR"
1222 fi
1223
1224 function install_hook {
1225@@ -40,6 +40,11 @@
1226 do_openstack_upgrade "$install_src" $PACKAGES
1227 fi
1228
1229+ # set this here until its fixed in grizzly packaging. (adam_g)
1230+ [[ "$cur" == "grizzly" ]] &&
1231+ set_or_update "compute_driver" "libvirt.LibvirtDriver"
1232+
1233+ configure_libvirt
1234 set_config_flags
1235 service_ctl all restart
1236 }
1237@@ -67,6 +72,18 @@
1238 exit 0
1239 fi
1240
1241+ # if the rabbitmq service is clustered among nodes with hacluster,
1242+ # point to its vip instead of its private-address.
1243+ local clustered=$(relation-get clustered)
1244+ if [[ -n "$clustered" ]] ; then
1245+ juju-log "$CHARM - ampq_changed: Configuring for "\
1246+ "access to haclustered rabbitmq service."
1247+ local vip=$(relation-get vip)
1248+ [[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\
1249+ && exit 0
1250+ rabbit_host="$vip"
1251+ fi
1252+
1253 local rabbit_user=$(config-get rabbit-user)
1254 local rabbit_vhost=$(config-get rabbit-vhost)
1255 juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \
1256@@ -101,7 +118,7 @@
1257 }
1258
1259 function db_changed {
1260- local db_host=`relation-get private-address`
1261+ local db_host=`relation-get db_host`
1262 local db_password=`relation-get nova_password`
1263
1264 if [[ -z $db_host ]] || [[ -z $db_password ]] ; then
1265@@ -125,15 +142,33 @@
1266 }
1267
1268 function image-service_changed {
1269- GLANCE_API_SERVER=`relation-get glance-api-server`
1270- if [[ -z $GLANCE_API_SERVER ]] ; then
1271- echo "image-service_changed: GLANCE_API_SERVER not yet set. Exit 0 and retry"
1272+ local api_server=`relation-get glance-api-server`
1273+ if [[ -z $api_server ]] ; then
1274+ echo "image-service_changed: api_server not yet set. Exit 0 and retry"
1275 exit 0
1276 fi
1277- set_or_update glance_api_servers $GLANCE_API_SERVER
1278+
1279+ if [[ "$(get_os_codename_package nova-common)" == "essex" ]] ; then
1280+ # essex needs glance_api_servers urls stripped of protocol.
1281+ api_server="$(echo $api_server | awk '{gsub(/http:\/\/|https:\/\//,"")}1')"
1282+ fi
1283+
1284+ set_or_update glance_api_servers $api_server
1285 service_ctl all restart
1286 }
1287
1288+function compute_joined {
1289+ migration_enabled || return 0
1290+ local relid="$1"
1291+ [[ -n "$relid" ]] && relid="-r $relid"
1292+ migration_auth="$(config-get migration-auth-type)"
1293+ case "$migration_auth" in
1294+ "none"|"None") return 0 ;;
1295+ "ssh") relation-set $relid ssh_public_key="$(cat /root/.ssh/id_rsa.pub)" ;;
1296+ esac
1297+ relation-set $relid migration_auth_type="$migration_auth"
1298+}
1299+
1300 function compute_changed {
1301 # nova-c-c will inform us of the configured network manager. nova-compute
1302 # needs to configure itself accordingly.
1303@@ -154,21 +189,29 @@
1304 done
1305 # Rabbit MQ relation may also already be in place
1306 # shared vhost with nova so just grab settings and
1307- # configure
1308+ # configure. need to be sure to use VIP if clustered.
1309+ local rabbit_clustered="" rabbit_vip="" rabbit_host="" rabbit_password=""
1310 r_ids="$(relation-ids amqp)"
1311 for id in $r_ids ; do
1312 for unit in $(relation-list -r $id) ; do
1313- local rabbit_host=$(relation-get -r $id private-address $unit)
1314- local rabbit_password=$(relation-get -r $id password $unit)
1315- if [[ -n $rabbit_host ]] && \
1316- [[ -n $rabbit_password ]]; then
1317- set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF"
1318- set_or_update rabbit_userid "$(config-get rabbit-user)" "$QUANTUM_CONF"
1319- set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF"
1320- set_or_update rabbit_virtual_host "$(config-get rabbit-vhost)" "$QUANTUM_CONF"
1321- fi
1322- done
1323+ [[ -z "$rabbit_clustered" ]] &&
1324+ rabbit_clustered=$(relation-get -r $id clustered $unit)
1325+ [[ -z "$rabbit_vip" ]] && rabbit_vip=$(relation-get -r $id vip $unit)
1326+ [[ -z "$rabbit_password" ]] &&
1327+ rabbit_password=$(relation-get -r $id password $unit)
1328+ rabbit_host=$(relation-get -r $id private-address $unit)
1329+ done
1330 done
1331+ if [[ -n "$rabbit_clustered" ]] ; then
1332+ rabbit_host="$rabbit_vip"
1333+ fi
1334+ if [[ -n $rabbit_host ]] && \
1335+ [[ -n $rabbit_password ]]; then
1336+ set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF"
1337+ set_or_update rabbit_userid "$(config-get rabbit-user)" "$QUANTUM_CONF"
1338+ set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF"
1339+ set_or_update rabbit_virtual_host "$(config-get rabbit-vhost)" "$QUANTUM_CONF"
1340+ fi
1341 else
1342 configure_network_manager "$network_manager"
1343 fi
1344@@ -178,6 +221,31 @@
1345 volume_service=`relation-get volume_service`
1346 [[ -n "$volume_service" ]] && configure_volume_service "$volume_service"
1347
1348+ if migration_enabled ; then
1349+ case "$(config-get migration-auth-type)" in
1350+ "ssh")
1351+ local known_hosts="$(relation-get known_hosts)"
1352+ local authorized_keys="$(relation-get authorized_keys)"
1353+ if [[ -n "$known_hosts" ]] &&
1354+ [[ -n "$authorized_keys" ]] ; then
1355+ juju-log "$CHARM: Saving new known_hosts+authorized_keys file."
1356+ echo "$known_hosts" | base64 -di >/root/.ssh/known_hosts
1357+ echo "$authorized_keys" | base64 -di >/root/.ssh/authorized_keys
1358+ fi
1359+ ;;
1360+ esac
1361+ fi
1362+
1363+ # If Keytone is configured manage SSL certs, nova-compute needs a copy
1364+ # of its CA installed.
1365+ local ca_cert="$(relation-get ca_cert)"
1366+ if [[ -n "$ca_cert" ]] ; then
1367+ juju-log "Installing Keystone CA certificate."
1368+ ca_cert="$(echo $ca_cert | base64 -di)"
1369+ echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
1370+ update-ca-certificates
1371+ fi
1372+
1373 # restart on all changed events. nova-c-c may send out a uuid to trigger
1374 # remote restarts of services here (after db migrations, for instance)
1375 service_ctl all restart
1376@@ -218,8 +286,12 @@
1377 EOF
1378
1379 if [ ! -f /etc/ceph/secret.xml ]; then
1380+ # This is just a label and it must be consistent across
1381+ # nova-compute nodes to support live migration.
1382+ UUID="514c9fca-8cbe-11e2-9c52-3bc8c7819472"
1383 cat > /etc/ceph/secret.xml << EOF
1384 <secret ephemeral='no' private='no'>
1385+ <uuid>$UUID</uuid>
1386 <usage type='ceph'>
1387 <name>client.$SERVICE_NAME secret</name>
1388 </usage>
1389@@ -228,10 +300,10 @@
1390 # Create secret for libvirt usage
1391 # note that this does limit ceph usage to
1392 # KVM only at this point in time.
1393- uuid=$(virsh secret-define --file /etc/ceph/secret.xml | cut -d " " -f 2)
1394- virsh secret-set-value --secret $uuid --base64 $KEY
1395+ virsh secret-define --file /etc/ceph/secret.xml
1396+ virsh secret-set-value --secret $UUID --base64 $KEY
1397 set_or_update rbd_user $SERVICE_NAME
1398- set_or_update rbd_secret_uuid $uuid
1399+ set_or_update rbd_secret_uuid $UUID
1400 set_or_update rbd_pool nova
1401 service_ctl all restart
1402 fi
1403@@ -252,6 +324,6 @@
1404 "identity-service-relation-changed") exit 0 ;;
1405 "ceph-relation-joined") ceph_joined;;
1406 "ceph-relation-changed") ceph_changed;;
1407- "cloud-compute-relation-joined" ) exit 0 ;;
1408+ "cloud-compute-relation-joined" ) compute_joined ;;
1409 "cloud-compute-relation-changed") compute_changed ;;
1410 esac
1411
1412=== modified file 'metadata.yaml'
1413--- metadata.yaml 2013-03-01 22:10:38 +0000
1414+++ metadata.yaml 2013-05-29 18:05:36 +0000
1415@@ -20,3 +20,6 @@
1416 interface: glance
1417 ceph:
1418 interface: ceph-client
1419+peers:
1420+ compute-peer:
1421+ interface: nova
1422
1423=== modified file 'revision'
1424--- revision 2013-03-05 17:34:40 +0000
1425+++ revision 2013-05-29 18:05:36 +0000
1426@@ -1,1 +1,1 @@
1427-81
1428+91

Subscribers

People subscribed via source and target branches