Merge lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix into lp:~openstack-charmers-archive/charms/trusty/neutron-api-odl/next
- Trusty Tahr (14.04)
- packagefix
- Merge into next
Status: | Merged |
---|---|
Merged at revision: | 12 |
Proposed branch: | lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/neutron-api-odl/next |
Diff against target: |
2291 lines (+996/-317) 20 files modified
hooks/charmhelpers/contrib/network/ip.py (+21/-19) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+10/-4) hooks/charmhelpers/contrib/openstack/context.py (+48/-10) hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+7/-5) hooks/charmhelpers/contrib/openstack/neutron.py (+20/-8) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+19/-11) hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+11/-0) hooks/charmhelpers/contrib/openstack/utils.py (+145/-67) hooks/charmhelpers/contrib/python/packages.py (+35/-11) hooks/charmhelpers/contrib/storage/linux/ceph.py (+441/-59) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/core/hookenv.py (+41/-7) hooks/charmhelpers/core/host.py (+103/-43) hooks/charmhelpers/core/services/helpers.py (+11/-5) hooks/charmhelpers/core/templating.py (+13/-7) hooks/charmhelpers/fetch/__init__.py (+9/-1) hooks/charmhelpers/fetch/archiveurl.py (+1/-1) hooks/charmhelpers/fetch/bzrurl.py (+22/-32) hooks/charmhelpers/fetch/giturl.py (+20/-23) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-4) |
To merge this branch: | bzr merge lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
David Ames (community) | Approve | ||
James Page | Pending | ||
Review via email:
|
Commit message
Description of the change
sync chram helpers as without that python-
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
uosci-testing-bot (uosci-testing-bot) wrote : | # |
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_unit_test #305 neutron-
UNIT OK: passed
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
uosci-testing-bot (uosci-testing-bot) wrote : | # |
charm_amulet_test #155 neutron-
AMULET FAIL: amulet-test failed
AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.
Full amulet test output: http://
Build: http://
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
David Ames (thedac) wrote : | # |
This is a simple charm helpers sync. To fix liberty version string handling. As now neutron-common is at version 7.0.1-0ubuntu1~
'
- ('7.0.0', 'liberty'),
+ ('7.0', 'liberty'),
+ ('8.0', 'mitaka'),
Approved
Preview Diff
1 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
2 | --- hooks/charmhelpers/contrib/network/ip.py 2015-11-03 12:29:06 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/network/ip.py 2016-02-12 19:36:45 +0000 | |||
4 | @@ -53,7 +53,7 @@ | |||
5 | 53 | 53 | ||
6 | 54 | 54 | ||
7 | 55 | def no_ip_found_error_out(network): | 55 | def no_ip_found_error_out(network): |
9 | 56 | errmsg = ("No IP address found in network: %s" % network) | 56 | errmsg = ("No IP address found in network(s): %s" % network) |
10 | 57 | raise ValueError(errmsg) | 57 | raise ValueError(errmsg) |
11 | 58 | 58 | ||
12 | 59 | 59 | ||
13 | @@ -61,7 +61,7 @@ | |||
14 | 61 | """Get an IPv4 or IPv6 address within the network from the host. | 61 | """Get an IPv4 or IPv6 address within the network from the host. |
15 | 62 | 62 | ||
16 | 63 | :param network (str): CIDR presentation format. For example, | 63 | :param network (str): CIDR presentation format. For example, |
18 | 64 | '192.168.1.0/24'. | 64 | '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
19 | 65 | :param fallback (str): If no address is found, return fallback. | 65 | :param fallback (str): If no address is found, return fallback. |
20 | 66 | :param fatal (boolean): If no address is found, fallback is not | 66 | :param fatal (boolean): If no address is found, fallback is not |
21 | 67 | set and fatal is True then exit(1). | 67 | set and fatal is True then exit(1). |
22 | @@ -75,24 +75,26 @@ | |||
23 | 75 | else: | 75 | else: |
24 | 76 | return None | 76 | return None |
25 | 77 | 77 | ||
36 | 78 | _validate_cidr(network) | 78 | networks = network.split() or [network] |
37 | 79 | network = netaddr.IPNetwork(network) | 79 | for network in networks: |
38 | 80 | for iface in netifaces.interfaces(): | 80 | _validate_cidr(network) |
39 | 81 | addresses = netifaces.ifaddresses(iface) | 81 | network = netaddr.IPNetwork(network) |
40 | 82 | if network.version == 4 and netifaces.AF_INET in addresses: | 82 | for iface in netifaces.interfaces(): |
41 | 83 | addr = addresses[netifaces.AF_INET][0]['addr'] | 83 | addresses = netifaces.ifaddresses(iface) |
42 | 84 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | 84 | if network.version == 4 and netifaces.AF_INET in addresses: |
43 | 85 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 85 | addr = addresses[netifaces.AF_INET][0]['addr'] |
44 | 86 | if cidr in network: | 86 | netmask = addresses[netifaces.AF_INET][0]['netmask'] |
45 | 87 | return str(cidr.ip) | 87 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
46 | 88 | if cidr in network: | ||
47 | 89 | return str(cidr.ip) | ||
48 | 88 | 90 | ||
56 | 89 | if network.version == 6 and netifaces.AF_INET6 in addresses: | 91 | if network.version == 6 and netifaces.AF_INET6 in addresses: |
57 | 90 | for addr in addresses[netifaces.AF_INET6]: | 92 | for addr in addresses[netifaces.AF_INET6]: |
58 | 91 | if not addr['addr'].startswith('fe80'): | 93 | if not addr['addr'].startswith('fe80'): |
59 | 92 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | 94 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
60 | 93 | addr['netmask'])) | 95 | addr['netmask'])) |
61 | 94 | if cidr in network: | 96 | if cidr in network: |
62 | 95 | return str(cidr.ip) | 97 | return str(cidr.ip) |
63 | 96 | 98 | ||
64 | 97 | if fallback is not None: | 99 | if fallback is not None: |
65 | 98 | return fallback | 100 | return fallback |
66 | 99 | 101 | ||
67 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
68 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-03 12:29:06 +0000 | |||
69 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-12 19:36:45 +0000 | |||
70 | @@ -121,10 +121,12 @@ | |||
71 | 121 | 121 | ||
72 | 122 | # Charms which should use the source config option | 122 | # Charms which should use the source config option |
73 | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
75 | 124 | 'ceph-osd', 'ceph-radosgw'] | 124 | 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] |
76 | 125 | 125 | ||
77 | 126 | # Charms which can not use openstack-origin, ie. many subordinates | 126 | # Charms which can not use openstack-origin, ie. many subordinates |
79 | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
80 | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', | ||
81 | 129 | 'cinder-backup'] | ||
82 | 128 | 130 | ||
83 | 129 | if self.openstack: | 131 | if self.openstack: |
84 | 130 | for svc in services: | 132 | for svc in services: |
85 | @@ -224,7 +226,8 @@ | |||
86 | 224 | self.precise_havana, self.precise_icehouse, | 226 | self.precise_havana, self.precise_icehouse, |
87 | 225 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 227 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
88 | 226 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | 228 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
90 | 227 | self.wily_liberty) = range(12) | 229 | self.wily_liberty, self.trusty_mitaka, |
91 | 230 | self.xenial_mitaka) = range(14) | ||
92 | 228 | 231 | ||
93 | 229 | releases = { | 232 | releases = { |
94 | 230 | ('precise', None): self.precise_essex, | 233 | ('precise', None): self.precise_essex, |
95 | @@ -236,9 +239,11 @@ | |||
96 | 236 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 239 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
97 | 237 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 240 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
98 | 238 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | 241 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
99 | 242 | ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, | ||
100 | 239 | ('utopic', None): self.utopic_juno, | 243 | ('utopic', None): self.utopic_juno, |
101 | 240 | ('vivid', None): self.vivid_kilo, | 244 | ('vivid', None): self.vivid_kilo, |
103 | 241 | ('wily', None): self.wily_liberty} | 245 | ('wily', None): self.wily_liberty, |
104 | 246 | ('xenial', None): self.xenial_mitaka} | ||
105 | 242 | return releases[(self.series, self.openstack)] | 247 | return releases[(self.series, self.openstack)] |
106 | 243 | 248 | ||
107 | 244 | def _get_openstack_release_string(self): | 249 | def _get_openstack_release_string(self): |
108 | @@ -255,6 +260,7 @@ | |||
109 | 255 | ('utopic', 'juno'), | 260 | ('utopic', 'juno'), |
110 | 256 | ('vivid', 'kilo'), | 261 | ('vivid', 'kilo'), |
111 | 257 | ('wily', 'liberty'), | 262 | ('wily', 'liberty'), |
112 | 263 | ('xenial', 'mitaka'), | ||
113 | 258 | ]) | 264 | ]) |
114 | 259 | if self.openstack: | 265 | if self.openstack: |
115 | 260 | os_origin = self.openstack.split(':')[1] | 266 | os_origin = self.openstack.split(':')[1] |
116 | 261 | 267 | ||
117 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
118 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-11-03 12:29:06 +0000 | |||
119 | +++ hooks/charmhelpers/contrib/openstack/context.py 2016-02-12 19:36:45 +0000 | |||
120 | @@ -57,6 +57,7 @@ | |||
121 | 57 | get_nic_hwaddr, | 57 | get_nic_hwaddr, |
122 | 58 | mkdir, | 58 | mkdir, |
123 | 59 | write_file, | 59 | write_file, |
124 | 60 | pwgen, | ||
125 | 60 | ) | 61 | ) |
126 | 61 | from charmhelpers.contrib.hahelpers.cluster import ( | 62 | from charmhelpers.contrib.hahelpers.cluster import ( |
127 | 62 | determine_apache_port, | 63 | determine_apache_port, |
128 | @@ -87,6 +88,14 @@ | |||
129 | 87 | is_bridge_member, | 88 | is_bridge_member, |
130 | 88 | ) | 89 | ) |
131 | 89 | from charmhelpers.contrib.openstack.utils import get_host_ip | 90 | from charmhelpers.contrib.openstack.utils import get_host_ip |
132 | 91 | from charmhelpers.core.unitdata import kv | ||
133 | 92 | |||
134 | 93 | try: | ||
135 | 94 | import psutil | ||
136 | 95 | except ImportError: | ||
137 | 96 | apt_install('python-psutil', fatal=True) | ||
138 | 97 | import psutil | ||
139 | 98 | |||
140 | 90 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 99 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
141 | 91 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | 100 | ADDRESS_TYPES = ['admin', 'internal', 'public'] |
142 | 92 | 101 | ||
143 | @@ -401,6 +410,7 @@ | |||
144 | 401 | auth_host = format_ipv6_addr(auth_host) or auth_host | 410 | auth_host = format_ipv6_addr(auth_host) or auth_host |
145 | 402 | svc_protocol = rdata.get('service_protocol') or 'http' | 411 | svc_protocol = rdata.get('service_protocol') or 'http' |
146 | 403 | auth_protocol = rdata.get('auth_protocol') or 'http' | 412 | auth_protocol = rdata.get('auth_protocol') or 'http' |
147 | 413 | api_version = rdata.get('api_version') or '2.0' | ||
148 | 404 | ctxt.update({'service_port': rdata.get('service_port'), | 414 | ctxt.update({'service_port': rdata.get('service_port'), |
149 | 405 | 'service_host': serv_host, | 415 | 'service_host': serv_host, |
150 | 406 | 'auth_host': auth_host, | 416 | 'auth_host': auth_host, |
151 | @@ -409,7 +419,8 @@ | |||
152 | 409 | 'admin_user': rdata.get('service_username'), | 419 | 'admin_user': rdata.get('service_username'), |
153 | 410 | 'admin_password': rdata.get('service_password'), | 420 | 'admin_password': rdata.get('service_password'), |
154 | 411 | 'service_protocol': svc_protocol, | 421 | 'service_protocol': svc_protocol, |
156 | 412 | 'auth_protocol': auth_protocol}) | 422 | 'auth_protocol': auth_protocol, |
157 | 423 | 'api_version': api_version}) | ||
158 | 413 | 424 | ||
159 | 414 | if self.context_complete(ctxt): | 425 | if self.context_complete(ctxt): |
160 | 415 | # NOTE(jamespage) this is required for >= icehouse | 426 | # NOTE(jamespage) this is required for >= icehouse |
161 | @@ -626,15 +637,28 @@ | |||
162 | 626 | if config('haproxy-client-timeout'): | 637 | if config('haproxy-client-timeout'): |
163 | 627 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | 638 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
164 | 628 | 639 | ||
165 | 640 | if config('haproxy-queue-timeout'): | ||
166 | 641 | ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') | ||
167 | 642 | |||
168 | 643 | if config('haproxy-connect-timeout'): | ||
169 | 644 | ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') | ||
170 | 645 | |||
171 | 629 | if config('prefer-ipv6'): | 646 | if config('prefer-ipv6'): |
172 | 630 | ctxt['ipv6'] = True | 647 | ctxt['ipv6'] = True |
173 | 631 | ctxt['local_host'] = 'ip6-localhost' | 648 | ctxt['local_host'] = 'ip6-localhost' |
174 | 632 | ctxt['haproxy_host'] = '::' | 649 | ctxt['haproxy_host'] = '::' |
175 | 633 | ctxt['stat_port'] = ':::8888' | ||
176 | 634 | else: | 650 | else: |
177 | 635 | ctxt['local_host'] = '127.0.0.1' | 651 | ctxt['local_host'] = '127.0.0.1' |
178 | 636 | ctxt['haproxy_host'] = '0.0.0.0' | 652 | ctxt['haproxy_host'] = '0.0.0.0' |
180 | 637 | ctxt['stat_port'] = ':8888' | 653 | |
181 | 654 | ctxt['stat_port'] = '8888' | ||
182 | 655 | |||
183 | 656 | db = kv() | ||
184 | 657 | ctxt['stat_password'] = db.get('stat-password') | ||
185 | 658 | if not ctxt['stat_password']: | ||
186 | 659 | ctxt['stat_password'] = db.set('stat-password', | ||
187 | 660 | pwgen(32)) | ||
188 | 661 | db.flush() | ||
189 | 638 | 662 | ||
190 | 639 | for frontend in cluster_hosts: | 663 | for frontend in cluster_hosts: |
191 | 640 | if (len(cluster_hosts[frontend]['backends']) > 1 or | 664 | if (len(cluster_hosts[frontend]['backends']) > 1 or |
192 | @@ -1088,6 +1112,20 @@ | |||
193 | 1088 | config_flags_parser(config_flags)} | 1112 | config_flags_parser(config_flags)} |
194 | 1089 | 1113 | ||
195 | 1090 | 1114 | ||
196 | 1115 | class LibvirtConfigFlagsContext(OSContextGenerator): | ||
197 | 1116 | """ | ||
198 | 1117 | This context provides support for extending | ||
199 | 1118 | the libvirt section through user-defined flags. | ||
200 | 1119 | """ | ||
201 | 1120 | def __call__(self): | ||
202 | 1121 | ctxt = {} | ||
203 | 1122 | libvirt_flags = config('libvirt-flags') | ||
204 | 1123 | if libvirt_flags: | ||
205 | 1124 | ctxt['libvirt_flags'] = config_flags_parser( | ||
206 | 1125 | libvirt_flags) | ||
207 | 1126 | return ctxt | ||
208 | 1127 | |||
209 | 1128 | |||
210 | 1091 | class SubordinateConfigContext(OSContextGenerator): | 1129 | class SubordinateConfigContext(OSContextGenerator): |
211 | 1092 | 1130 | ||
212 | 1093 | """ | 1131 | """ |
213 | @@ -1228,13 +1266,11 @@ | |||
214 | 1228 | 1266 | ||
215 | 1229 | @property | 1267 | @property |
216 | 1230 | def num_cpus(self): | 1268 | def num_cpus(self): |
224 | 1231 | try: | 1269 | # NOTE: use cpu_count if present (16.04 support) |
225 | 1232 | from psutil import NUM_CPUS | 1270 | if hasattr(psutil, 'cpu_count'): |
226 | 1233 | except ImportError: | 1271 | return psutil.cpu_count() |
227 | 1234 | apt_install('python-psutil', fatal=True) | 1272 | else: |
228 | 1235 | from psutil import NUM_CPUS | 1273 | return psutil.NUM_CPUS |
222 | 1236 | |||
223 | 1237 | return NUM_CPUS | ||
229 | 1238 | 1274 | ||
230 | 1239 | def __call__(self): | 1275 | def __call__(self): |
231 | 1240 | multiplier = config('worker-multiplier') or 0 | 1276 | multiplier = config('worker-multiplier') or 0 |
232 | @@ -1437,6 +1473,8 @@ | |||
233 | 1437 | rdata.get('service_protocol') or 'http', | 1473 | rdata.get('service_protocol') or 'http', |
234 | 1438 | 'auth_protocol': | 1474 | 'auth_protocol': |
235 | 1439 | rdata.get('auth_protocol') or 'http', | 1475 | rdata.get('auth_protocol') or 'http', |
236 | 1476 | 'api_version': | ||
237 | 1477 | rdata.get('api_version') or '2.0', | ||
238 | 1440 | } | 1478 | } |
239 | 1441 | if self.context_complete(ctxt): | 1479 | if self.context_complete(ctxt): |
240 | 1442 | return ctxt | 1480 | return ctxt |
241 | 1443 | 1481 | ||
242 | === modified file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh' | |||
243 | --- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-06-24 12:22:08 +0000 | |||
244 | +++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2016-02-12 19:36:45 +0000 | |||
245 | @@ -9,15 +9,17 @@ | |||
246 | 9 | CRITICAL=0 | 9 | CRITICAL=0 |
247 | 10 | NOTACTIVE='' | 10 | NOTACTIVE='' |
248 | 11 | LOGFILE=/var/log/nagios/check_haproxy.log | 11 | LOGFILE=/var/log/nagios/check_haproxy.log |
250 | 12 | AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') | 12 | AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') |
251 | 13 | 13 | ||
253 | 14 | for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); | 14 | typeset -i N_INSTANCES=0 |
254 | 15 | for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) | ||
255 | 15 | do | 16 | do |
257 | 16 | output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') | 17 | N_INSTANCES=N_INSTANCES+1 |
258 | 18 | output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') | ||
259 | 17 | if [ $? != 0 ]; then | 19 | if [ $? != 0 ]; then |
260 | 18 | date >> $LOGFILE | 20 | date >> $LOGFILE |
261 | 19 | echo $output >> $LOGFILE | 21 | echo $output >> $LOGFILE |
263 | 20 | /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 | 22 | /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 |
264 | 21 | CRITICAL=1 | 23 | CRITICAL=1 |
265 | 22 | NOTACTIVE="${NOTACTIVE} $appserver" | 24 | NOTACTIVE="${NOTACTIVE} $appserver" |
266 | 23 | fi | 25 | fi |
267 | @@ -28,5 +30,5 @@ | |||
268 | 28 | exit 2 | 30 | exit 2 |
269 | 29 | fi | 31 | fi |
270 | 30 | 32 | ||
272 | 31 | echo "OK: All haproxy instances looking good" | 33 | echo "OK: All haproxy instances ($N_INSTANCES) looking good" |
273 | 32 | exit 0 | 34 | exit 0 |
274 | 33 | 35 | ||
275 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
276 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-11-03 12:29:06 +0000 | |||
277 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-02-12 19:36:45 +0000 | |||
278 | @@ -50,7 +50,7 @@ | |||
279 | 50 | if kernel_version() >= (3, 13): | 50 | if kernel_version() >= (3, 13): |
280 | 51 | return [] | 51 | return [] |
281 | 52 | else: | 52 | else: |
283 | 53 | return ['openvswitch-datapath-dkms'] | 53 | return [headers_package(), 'openvswitch-datapath-dkms'] |
284 | 54 | 54 | ||
285 | 55 | 55 | ||
286 | 56 | # legacy | 56 | # legacy |
287 | @@ -70,7 +70,7 @@ | |||
288 | 70 | relation_prefix='neutron', | 70 | relation_prefix='neutron', |
289 | 71 | ssl_dir=QUANTUM_CONF_DIR)], | 71 | ssl_dir=QUANTUM_CONF_DIR)], |
290 | 72 | 'services': ['quantum-plugin-openvswitch-agent'], | 72 | 'services': ['quantum-plugin-openvswitch-agent'], |
292 | 73 | 'packages': [[headers_package()] + determine_dkms_package(), | 73 | 'packages': [determine_dkms_package(), |
293 | 74 | ['quantum-plugin-openvswitch-agent']], | 74 | ['quantum-plugin-openvswitch-agent']], |
294 | 75 | 'server_packages': ['quantum-server', | 75 | 'server_packages': ['quantum-server', |
295 | 76 | 'quantum-plugin-openvswitch'], | 76 | 'quantum-plugin-openvswitch'], |
296 | @@ -111,7 +111,7 @@ | |||
297 | 111 | relation_prefix='neutron', | 111 | relation_prefix='neutron', |
298 | 112 | ssl_dir=NEUTRON_CONF_DIR)], | 112 | ssl_dir=NEUTRON_CONF_DIR)], |
299 | 113 | 'services': ['neutron-plugin-openvswitch-agent'], | 113 | 'services': ['neutron-plugin-openvswitch-agent'], |
301 | 114 | 'packages': [[headers_package()] + determine_dkms_package(), | 114 | 'packages': [determine_dkms_package(), |
302 | 115 | ['neutron-plugin-openvswitch-agent']], | 115 | ['neutron-plugin-openvswitch-agent']], |
303 | 116 | 'server_packages': ['neutron-server', | 116 | 'server_packages': ['neutron-server', |
304 | 117 | 'neutron-plugin-openvswitch'], | 117 | 'neutron-plugin-openvswitch'], |
305 | @@ -155,7 +155,7 @@ | |||
306 | 155 | relation_prefix='neutron', | 155 | relation_prefix='neutron', |
307 | 156 | ssl_dir=NEUTRON_CONF_DIR)], | 156 | ssl_dir=NEUTRON_CONF_DIR)], |
308 | 157 | 'services': [], | 157 | 'services': [], |
310 | 158 | 'packages': [[headers_package()] + determine_dkms_package(), | 158 | 'packages': [determine_dkms_package(), |
311 | 159 | ['neutron-plugin-cisco']], | 159 | ['neutron-plugin-cisco']], |
312 | 160 | 'server_packages': ['neutron-server', | 160 | 'server_packages': ['neutron-server', |
313 | 161 | 'neutron-plugin-cisco'], | 161 | 'neutron-plugin-cisco'], |
314 | @@ -174,7 +174,7 @@ | |||
315 | 174 | 'neutron-dhcp-agent', | 174 | 'neutron-dhcp-agent', |
316 | 175 | 'nova-api-metadata', | 175 | 'nova-api-metadata', |
317 | 176 | 'etcd'], | 176 | 'etcd'], |
319 | 177 | 'packages': [[headers_package()] + determine_dkms_package(), | 177 | 'packages': [determine_dkms_package(), |
320 | 178 | ['calico-compute', | 178 | ['calico-compute', |
321 | 179 | 'bird', | 179 | 'bird', |
322 | 180 | 'neutron-dhcp-agent', | 180 | 'neutron-dhcp-agent', |
323 | @@ -204,8 +204,8 @@ | |||
324 | 204 | database=config('database'), | 204 | database=config('database'), |
325 | 205 | ssl_dir=NEUTRON_CONF_DIR)], | 205 | ssl_dir=NEUTRON_CONF_DIR)], |
326 | 206 | 'services': [], | 206 | 'services': [], |
329 | 207 | 'packages': [['plumgrid-lxc'], | 207 | 'packages': ['plumgrid-lxc', |
330 | 208 | ['iovisor-dkms']], | 208 | 'iovisor-dkms'], |
331 | 209 | 'server_packages': ['neutron-server', | 209 | 'server_packages': ['neutron-server', |
332 | 210 | 'neutron-plugin-plumgrid'], | 210 | 'neutron-plugin-plumgrid'], |
333 | 211 | 'server_services': ['neutron-server'] | 211 | 'server_services': ['neutron-server'] |
334 | @@ -219,7 +219,7 @@ | |||
335 | 219 | relation_prefix='neutron', | 219 | relation_prefix='neutron', |
336 | 220 | ssl_dir=NEUTRON_CONF_DIR)], | 220 | ssl_dir=NEUTRON_CONF_DIR)], |
337 | 221 | 'services': [], | 221 | 'services': [], |
339 | 222 | 'packages': [[headers_package()] + determine_dkms_package()], | 222 | 'packages': [determine_dkms_package()], |
340 | 223 | 'server_packages': ['neutron-server', | 223 | 'server_packages': ['neutron-server', |
341 | 224 | 'python-neutron-plugin-midonet'], | 224 | 'python-neutron-plugin-midonet'], |
342 | 225 | 'server_services': ['neutron-server'] | 225 | 'server_services': ['neutron-server'] |
343 | @@ -233,6 +233,18 @@ | |||
344 | 233 | 'neutron-plugin-ml2'] | 233 | 'neutron-plugin-ml2'] |
345 | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards |
346 | 235 | plugins['nvp'] = plugins['nsx'] | 235 | plugins['nvp'] = plugins['nsx'] |
347 | 236 | if release >= 'kilo': | ||
348 | 237 | plugins['midonet']['driver'] = ( | ||
349 | 238 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') | ||
350 | 239 | if release >= 'liberty': | ||
351 | 240 | midonet_origin = config('midonet-origin') | ||
352 | 241 | if midonet_origin is not None and midonet_origin[4:5] == '1': | ||
353 | 242 | plugins['midonet']['driver'] = ( | ||
354 | 243 | 'midonet.neutron.plugin_v1.MidonetPluginV2') | ||
355 | 244 | plugins['midonet']['server_packages'].remove( | ||
356 | 245 | 'python-neutron-plugin-midonet') | ||
357 | 246 | plugins['midonet']['server_packages'].append( | ||
358 | 247 | 'python-networking-midonet') | ||
359 | 236 | return plugins | 248 | return plugins |
360 | 237 | 249 | ||
361 | 238 | 250 | ||
362 | 239 | 251 | ||
363 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
364 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-06-24 12:22:08 +0000 | |||
365 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-02-12 19:36:45 +0000 | |||
366 | @@ -12,27 +12,35 @@ | |||
367 | 12 | option tcplog | 12 | option tcplog |
368 | 13 | option dontlognull | 13 | option dontlognull |
369 | 14 | retries 3 | 14 | retries 3 |
373 | 15 | timeout queue 1000 | 15 | {%- if haproxy_queue_timeout %} |
374 | 16 | timeout connect 1000 | 16 | timeout queue {{ haproxy_queue_timeout }} |
375 | 17 | {% if haproxy_client_timeout -%} | 17 | {%- else %} |
376 | 18 | timeout queue 5000 | ||
377 | 19 | {%- endif %} | ||
378 | 20 | {%- if haproxy_connect_timeout %} | ||
379 | 21 | timeout connect {{ haproxy_connect_timeout }} | ||
380 | 22 | {%- else %} | ||
381 | 23 | timeout connect 5000 | ||
382 | 24 | {%- endif %} | ||
383 | 25 | {%- if haproxy_client_timeout %} | ||
384 | 18 | timeout client {{ haproxy_client_timeout }} | 26 | timeout client {{ haproxy_client_timeout }} |
386 | 19 | {% else -%} | 27 | {%- else %} |
387 | 20 | timeout client 30000 | 28 | timeout client 30000 |
391 | 21 | {% endif -%} | 29 | {%- endif %} |
392 | 22 | 30 | {%- if haproxy_server_timeout %} | |
390 | 23 | {% if haproxy_server_timeout -%} | ||
393 | 24 | timeout server {{ haproxy_server_timeout }} | 31 | timeout server {{ haproxy_server_timeout }} |
395 | 25 | {% else -%} | 32 | {%- else %} |
396 | 26 | timeout server 30000 | 33 | timeout server 30000 |
398 | 27 | {% endif -%} | 34 | {%- endif %} |
399 | 28 | 35 | ||
401 | 29 | listen stats {{ stat_port }} | 36 | listen stats |
402 | 37 | bind {{ local_host }}:{{ stat_port }} | ||
403 | 30 | mode http | 38 | mode http |
404 | 31 | stats enable | 39 | stats enable |
405 | 32 | stats hide-version | 40 | stats hide-version |
406 | 33 | stats realm Haproxy\ Statistics | 41 | stats realm Haproxy\ Statistics |
407 | 34 | stats uri / | 42 | stats uri / |
409 | 35 | stats auth admin:password | 43 | stats auth admin:{{ stat_password }} |
410 | 36 | 44 | ||
411 | 37 | {% if frontends -%} | 45 | {% if frontends -%} |
412 | 38 | {% for service, ports in service_ports.items() -%} | 46 | {% for service, ports in service_ports.items() -%} |
413 | 39 | 47 | ||
414 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken' | |||
415 | --- hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken 2015-06-24 12:22:08 +0000 | |||
416 | +++ hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken 2016-02-12 19:36:45 +0000 | |||
417 | @@ -1,4 +1,14 @@ | |||
418 | 1 | {% if auth_host -%} | 1 | {% if auth_host -%} |
419 | 2 | {% if api_version == '3' -%} | ||
420 | 3 | [keystone_authtoken] | ||
421 | 4 | auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }} | ||
422 | 5 | project_name = {{ admin_tenant_name }} | ||
423 | 6 | username = {{ admin_user }} | ||
424 | 7 | password = {{ admin_password }} | ||
425 | 8 | project_domain_name = default | ||
426 | 9 | user_domain_name = default | ||
427 | 10 | auth_plugin = password | ||
428 | 11 | {% else -%} | ||
429 | 2 | [keystone_authtoken] | 12 | [keystone_authtoken] |
430 | 3 | identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} | 13 | identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} |
431 | 4 | auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} | 14 | auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} |
432 | @@ -7,3 +17,4 @@ | |||
433 | 7 | admin_password = {{ admin_password }} | 17 | admin_password = {{ admin_password }} |
434 | 8 | signing_dir = {{ signing_dir }} | 18 | signing_dir = {{ signing_dir }} |
435 | 9 | {% endif -%} | 19 | {% endif -%} |
436 | 20 | {% endif -%} | ||
437 | 10 | 21 | ||
438 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
439 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-11-03 12:29:06 +0000 | |||
440 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2016-02-12 19:36:45 +0000 | |||
441 | @@ -25,6 +25,7 @@ | |||
442 | 25 | import re | 25 | import re |
443 | 26 | 26 | ||
444 | 27 | import six | 27 | import six |
445 | 28 | import tempfile | ||
446 | 28 | import traceback | 29 | import traceback |
447 | 29 | import uuid | 30 | import uuid |
448 | 30 | import yaml | 31 | import yaml |
449 | @@ -41,6 +42,7 @@ | |||
450 | 41 | config, | 42 | config, |
451 | 42 | log as juju_log, | 43 | log as juju_log, |
452 | 43 | charm_dir, | 44 | charm_dir, |
453 | 45 | DEBUG, | ||
454 | 44 | INFO, | 46 | INFO, |
455 | 45 | related_units, | 47 | related_units, |
456 | 46 | relation_ids, | 48 | relation_ids, |
457 | @@ -86,6 +88,7 @@ | |||
458 | 86 | ('utopic', 'juno'), | 88 | ('utopic', 'juno'), |
459 | 87 | ('vivid', 'kilo'), | 89 | ('vivid', 'kilo'), |
460 | 88 | ('wily', 'liberty'), | 90 | ('wily', 'liberty'), |
461 | 91 | ('xenial', 'mitaka'), | ||
462 | 89 | ]) | 92 | ]) |
463 | 90 | 93 | ||
464 | 91 | 94 | ||
465 | @@ -99,61 +102,70 @@ | |||
466 | 99 | ('2014.2', 'juno'), | 102 | ('2014.2', 'juno'), |
467 | 100 | ('2015.1', 'kilo'), | 103 | ('2015.1', 'kilo'), |
468 | 101 | ('2015.2', 'liberty'), | 104 | ('2015.2', 'liberty'), |
469 | 105 | ('2016.1', 'mitaka'), | ||
470 | 102 | ]) | 106 | ]) |
471 | 103 | 107 | ||
473 | 104 | # The ugly duckling | 108 | # The ugly duckling - must list releases oldest to newest |
474 | 105 | SWIFT_CODENAMES = OrderedDict([ | 109 | SWIFT_CODENAMES = OrderedDict([ |
496 | 106 | ('1.4.3', 'diablo'), | 110 | ('diablo', |
497 | 107 | ('1.4.8', 'essex'), | 111 | ['1.4.3']), |
498 | 108 | ('1.7.4', 'folsom'), | 112 | ('essex', |
499 | 109 | ('1.8.0', 'grizzly'), | 113 | ['1.4.8']), |
500 | 110 | ('1.7.7', 'grizzly'), | 114 | ('folsom', |
501 | 111 | ('1.7.6', 'grizzly'), | 115 | ['1.7.4']), |
502 | 112 | ('1.10.0', 'havana'), | 116 | ('grizzly', |
503 | 113 | ('1.9.1', 'havana'), | 117 | ['1.7.6', '1.7.7', '1.8.0']), |
504 | 114 | ('1.9.0', 'havana'), | 118 | ('havana', |
505 | 115 | ('1.13.1', 'icehouse'), | 119 | ['1.9.0', '1.9.1', '1.10.0']), |
506 | 116 | ('1.13.0', 'icehouse'), | 120 | ('icehouse', |
507 | 117 | ('1.12.0', 'icehouse'), | 121 | ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), |
508 | 118 | ('1.11.0', 'icehouse'), | 122 | ('juno', |
509 | 119 | ('2.0.0', 'juno'), | 123 | ['2.0.0', '2.1.0', '2.2.0']), |
510 | 120 | ('2.1.0', 'juno'), | 124 | ('kilo', |
511 | 121 | ('2.2.0', 'juno'), | 125 | ['2.2.1', '2.2.2']), |
512 | 122 | ('2.2.1', 'kilo'), | 126 | ('liberty', |
513 | 123 | ('2.2.2', 'kilo'), | 127 | ['2.3.0', '2.4.0', '2.5.0']), |
514 | 124 | ('2.3.0', 'liberty'), | 128 | ('mitaka', |
515 | 125 | ('2.4.0', 'liberty'), | 129 | ['2.5.0']), |
495 | 126 | ('2.5.0', 'liberty'), | ||
516 | 127 | ]) | 130 | ]) |
517 | 128 | 131 | ||
518 | 129 | # >= Liberty version->codename mapping | 132 | # >= Liberty version->codename mapping |
519 | 130 | PACKAGE_CODENAMES = { | 133 | PACKAGE_CODENAMES = { |
520 | 131 | 'nova-common': OrderedDict([ | 134 | 'nova-common': OrderedDict([ |
522 | 132 | ('12.0.0', 'liberty'), | 135 | ('12.0', 'liberty'), |
523 | 136 | ('13.0', 'mitaka'), | ||
524 | 133 | ]), | 137 | ]), |
525 | 134 | 'neutron-common': OrderedDict([ | 138 | 'neutron-common': OrderedDict([ |
527 | 135 | ('7.0.0', 'liberty'), | 139 | ('7.0', 'liberty'), |
528 | 140 | ('8.0', 'mitaka'), | ||
529 | 136 | ]), | 141 | ]), |
530 | 137 | 'cinder-common': OrderedDict([ | 142 | 'cinder-common': OrderedDict([ |
532 | 138 | ('7.0.0', 'liberty'), | 143 | ('7.0', 'liberty'), |
533 | 144 | ('8.0', 'mitaka'), | ||
534 | 139 | ]), | 145 | ]), |
535 | 140 | 'keystone': OrderedDict([ | 146 | 'keystone': OrderedDict([ |
537 | 141 | ('8.0.0', 'liberty'), | 147 | ('8.0', 'liberty'), |
538 | 148 | ('9.0', 'mitaka'), | ||
539 | 142 | ]), | 149 | ]), |
540 | 143 | 'horizon-common': OrderedDict([ | 150 | 'horizon-common': OrderedDict([ |
542 | 144 | ('8.0.0', 'liberty'), | 151 | ('8.0', 'liberty'), |
543 | 152 | ('9.0', 'mitaka'), | ||
544 | 145 | ]), | 153 | ]), |
545 | 146 | 'ceilometer-common': OrderedDict([ | 154 | 'ceilometer-common': OrderedDict([ |
547 | 147 | ('5.0.0', 'liberty'), | 155 | ('5.0', 'liberty'), |
548 | 156 | ('6.0', 'mitaka'), | ||
549 | 148 | ]), | 157 | ]), |
550 | 149 | 'heat-common': OrderedDict([ | 158 | 'heat-common': OrderedDict([ |
552 | 150 | ('5.0.0', 'liberty'), | 159 | ('5.0', 'liberty'), |
553 | 160 | ('6.0', 'mitaka'), | ||
554 | 151 | ]), | 161 | ]), |
555 | 152 | 'glance-common': OrderedDict([ | 162 | 'glance-common': OrderedDict([ |
557 | 153 | ('11.0.0', 'liberty'), | 163 | ('11.0', 'liberty'), |
558 | 164 | ('12.0', 'mitaka'), | ||
559 | 154 | ]), | 165 | ]), |
560 | 155 | 'openstack-dashboard': OrderedDict([ | 166 | 'openstack-dashboard': OrderedDict([ |
562 | 156 | ('8.0.0', 'liberty'), | 167 | ('8.0', 'liberty'), |
563 | 168 | ('9.0', 'mitaka'), | ||
564 | 157 | ]), | 169 | ]), |
565 | 158 | } | 170 | } |
566 | 159 | 171 | ||
567 | @@ -216,6 +228,33 @@ | |||
568 | 216 | error_out(e) | 228 | error_out(e) |
569 | 217 | 229 | ||
570 | 218 | 230 | ||
571 | 231 | def get_os_version_codename_swift(codename): | ||
572 | 232 | '''Determine OpenStack version number of swift from codename.''' | ||
573 | 233 | for k, v in six.iteritems(SWIFT_CODENAMES): | ||
574 | 234 | if k == codename: | ||
575 | 235 | return v[-1] | ||
576 | 236 | e = 'Could not derive swift version for '\ | ||
577 | 237 | 'codename: %s' % codename | ||
578 | 238 | error_out(e) | ||
579 | 239 | |||
580 | 240 | |||
581 | 241 | def get_swift_codename(version): | ||
582 | 242 | '''Determine OpenStack codename that corresponds to swift version.''' | ||
583 | 243 | codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] | ||
584 | 244 | if len(codenames) > 1: | ||
585 | 245 | # If more than one release codename contains this version we determine | ||
586 | 246 | # the actual codename based on the highest available install source. | ||
587 | 247 | for codename in reversed(codenames): | ||
588 | 248 | releases = UBUNTU_OPENSTACK_RELEASE | ||
589 | 249 | release = [k for k, v in six.iteritems(releases) if codename in v] | ||
590 | 250 | ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) | ||
591 | 251 | if codename in ret or release[0] in ret: | ||
592 | 252 | return codename | ||
593 | 253 | elif len(codenames) == 1: | ||
594 | 254 | return codenames[0] | ||
595 | 255 | return None | ||
596 | 256 | |||
597 | 257 | |||
598 | 219 | def get_os_codename_package(package, fatal=True): | 258 | def get_os_codename_package(package, fatal=True): |
599 | 220 | '''Derive OpenStack release codename from an installed package.''' | 259 | '''Derive OpenStack release codename from an installed package.''' |
600 | 221 | import apt_pkg as apt | 260 | import apt_pkg as apt |
601 | @@ -240,7 +279,14 @@ | |||
602 | 240 | error_out(e) | 279 | error_out(e) |
603 | 241 | 280 | ||
604 | 242 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 281 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
606 | 243 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) | 282 | if 'swift' in pkg.name: |
607 | 283 | # Fully x.y.z match for swift versions | ||
608 | 284 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) | ||
609 | 285 | else: | ||
610 | 286 | # x.y match only for 20XX.X | ||
611 | 287 | # and ignore patch level for other packages | ||
612 | 288 | match = re.match('^(\d+)\.(\d+)', vers) | ||
613 | 289 | |||
614 | 244 | if match: | 290 | if match: |
615 | 245 | vers = match.group(0) | 291 | vers = match.group(0) |
616 | 246 | 292 | ||
617 | @@ -252,13 +298,8 @@ | |||
618 | 252 | # < Liberty co-ordinated project versions | 298 | # < Liberty co-ordinated project versions |
619 | 253 | try: | 299 | try: |
620 | 254 | if 'swift' in pkg.name: | 300 | if 'swift' in pkg.name: |
626 | 255 | swift_vers = vers[:5] | 301 | return get_swift_codename(vers) |
622 | 256 | if swift_vers not in SWIFT_CODENAMES: | ||
623 | 257 | # Deal with 1.10.0 upward | ||
624 | 258 | swift_vers = vers[:6] | ||
625 | 259 | return SWIFT_CODENAMES[swift_vers] | ||
627 | 260 | else: | 302 | else: |
628 | 261 | vers = vers[:6] | ||
629 | 262 | return OPENSTACK_CODENAMES[vers] | 303 | return OPENSTACK_CODENAMES[vers] |
630 | 263 | except KeyError: | 304 | except KeyError: |
631 | 264 | if not fatal: | 305 | if not fatal: |
632 | @@ -276,12 +317,14 @@ | |||
633 | 276 | 317 | ||
634 | 277 | if 'swift' in pkg: | 318 | if 'swift' in pkg: |
635 | 278 | vers_map = SWIFT_CODENAMES | 319 | vers_map = SWIFT_CODENAMES |
636 | 320 | for cname, version in six.iteritems(vers_map): | ||
637 | 321 | if cname == codename: | ||
638 | 322 | return version[-1] | ||
639 | 279 | else: | 323 | else: |
640 | 280 | vers_map = OPENSTACK_CODENAMES | 324 | vers_map = OPENSTACK_CODENAMES |
645 | 281 | 325 | for version, cname in six.iteritems(vers_map): | |
646 | 282 | for version, cname in six.iteritems(vers_map): | 326 | if cname == codename: |
647 | 283 | if cname == codename: | 327 | return version |
644 | 284 | return version | ||
648 | 285 | # e = "Could not determine OpenStack version for package: %s" % pkg | 328 | # e = "Could not determine OpenStack version for package: %s" % pkg |
649 | 286 | # error_out(e) | 329 | # error_out(e) |
650 | 287 | 330 | ||
651 | @@ -306,12 +349,42 @@ | |||
652 | 306 | 349 | ||
653 | 307 | 350 | ||
654 | 308 | def import_key(keyid): | 351 | def import_key(keyid): |
661 | 309 | cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ | 352 | key = keyid.strip() |
662 | 310 | "--recv-keys %s" % keyid | 353 | if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and |
663 | 311 | try: | 354 | key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): |
664 | 312 | subprocess.check_call(cmd.split(' ')) | 355 | juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) |
665 | 313 | except subprocess.CalledProcessError: | 356 | juju_log("Importing ASCII Armor PGP key", level=DEBUG) |
666 | 314 | error_out("Error importing repo key %s" % keyid) | 357 | with tempfile.NamedTemporaryFile() as keyfile: |
667 | 358 | with open(keyfile.name, 'w') as fd: | ||
668 | 359 | fd.write(key) | ||
669 | 360 | fd.write("\n") | ||
670 | 361 | |||
671 | 362 | cmd = ['apt-key', 'add', keyfile.name] | ||
672 | 363 | try: | ||
673 | 364 | subprocess.check_call(cmd) | ||
674 | 365 | except subprocess.CalledProcessError: | ||
675 | 366 | error_out("Error importing PGP key '%s'" % key) | ||
676 | 367 | else: | ||
677 | 368 | juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) | ||
678 | 369 | juju_log("Importing PGP key from keyserver", level=DEBUG) | ||
679 | 370 | cmd = ['apt-key', 'adv', '--keyserver', | ||
680 | 371 | 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] | ||
681 | 372 | try: | ||
682 | 373 | subprocess.check_call(cmd) | ||
683 | 374 | except subprocess.CalledProcessError: | ||
684 | 375 | error_out("Error importing PGP key '%s'" % key) | ||
685 | 376 | |||
686 | 377 | |||
687 | 378 | def get_source_and_pgp_key(input): | ||
688 | 379 | """Look for a pgp key ID or ascii-armor key in the given input.""" | ||
689 | 380 | index = input.strip() | ||
690 | 381 | index = input.rfind('|') | ||
691 | 382 | if index < 0: | ||
692 | 383 | return input, None | ||
693 | 384 | |||
694 | 385 | key = input[index + 1:].strip('|') | ||
695 | 386 | source = input[:index] | ||
696 | 387 | return source, key | ||
697 | 315 | 388 | ||
698 | 316 | 389 | ||
699 | 317 | def configure_installation_source(rel): | 390 | def configure_installation_source(rel): |
700 | @@ -323,16 +396,16 @@ | |||
701 | 323 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 396 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
702 | 324 | f.write(DISTRO_PROPOSED % ubuntu_rel) | 397 | f.write(DISTRO_PROPOSED % ubuntu_rel) |
703 | 325 | elif rel[:4] == "ppa:": | 398 | elif rel[:4] == "ppa:": |
705 | 326 | src = rel | 399 | src, key = get_source_and_pgp_key(rel) |
706 | 400 | if key: | ||
707 | 401 | import_key(key) | ||
708 | 402 | |||
709 | 327 | subprocess.check_call(["add-apt-repository", "-y", src]) | 403 | subprocess.check_call(["add-apt-repository", "-y", src]) |
710 | 328 | elif rel[:3] == "deb": | 404 | elif rel[:3] == "deb": |
715 | 329 | l = len(rel.split('|')) | 405 | src, key = get_source_and_pgp_key(rel) |
716 | 330 | if l == 2: | 406 | if key: |
713 | 331 | src, key = rel.split('|') | ||
714 | 332 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
717 | 333 | import_key(key) | 407 | import_key(key) |
720 | 334 | elif l == 1: | 408 | |
719 | 335 | src = rel | ||
721 | 336 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | 409 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
722 | 337 | f.write(src) | 410 | f.write(src) |
723 | 338 | elif rel[:6] == 'cloud:': | 411 | elif rel[:6] == 'cloud:': |
724 | @@ -377,6 +450,9 @@ | |||
725 | 377 | 'liberty': 'trusty-updates/liberty', | 450 | 'liberty': 'trusty-updates/liberty', |
726 | 378 | 'liberty/updates': 'trusty-updates/liberty', | 451 | 'liberty/updates': 'trusty-updates/liberty', |
727 | 379 | 'liberty/proposed': 'trusty-proposed/liberty', | 452 | 'liberty/proposed': 'trusty-proposed/liberty', |
728 | 453 | 'mitaka': 'trusty-updates/mitaka', | ||
729 | 454 | 'mitaka/updates': 'trusty-updates/mitaka', | ||
730 | 455 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
731 | 380 | } | 456 | } |
732 | 381 | 457 | ||
733 | 382 | try: | 458 | try: |
734 | @@ -444,11 +520,16 @@ | |||
735 | 444 | cur_vers = get_os_version_package(package) | 520 | cur_vers = get_os_version_package(package) |
736 | 445 | if "swift" in package: | 521 | if "swift" in package: |
737 | 446 | codename = get_os_codename_install_source(src) | 522 | codename = get_os_codename_install_source(src) |
739 | 447 | available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) | 523 | avail_vers = get_os_version_codename_swift(codename) |
740 | 448 | else: | 524 | else: |
742 | 449 | available_vers = get_os_version_install_source(src) | 525 | avail_vers = get_os_version_install_source(src) |
743 | 450 | apt.init() | 526 | apt.init() |
745 | 451 | return apt.version_compare(available_vers, cur_vers) == 1 | 527 | if "swift" in package: |
746 | 528 | major_cur_vers = cur_vers.split('.', 1)[0] | ||
747 | 529 | major_avail_vers = avail_vers.split('.', 1)[0] | ||
748 | 530 | major_diff = apt.version_compare(major_avail_vers, major_cur_vers) | ||
749 | 531 | return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) | ||
750 | 532 | return apt.version_compare(avail_vers, cur_vers) == 1 | ||
751 | 452 | 533 | ||
752 | 453 | 534 | ||
753 | 454 | def ensure_block_device(block_device): | 535 | def ensure_block_device(block_device): |
754 | @@ -577,7 +658,7 @@ | |||
755 | 577 | return yaml.load(projects_yaml) | 658 | return yaml.load(projects_yaml) |
756 | 578 | 659 | ||
757 | 579 | 660 | ||
759 | 580 | def git_clone_and_install(projects_yaml, core_project, depth=1): | 661 | def git_clone_and_install(projects_yaml, core_project): |
760 | 581 | """ | 662 | """ |
761 | 582 | Clone/install all specified OpenStack repositories. | 663 | Clone/install all specified OpenStack repositories. |
762 | 583 | 664 | ||
763 | @@ -627,6 +708,9 @@ | |||
764 | 627 | for p in projects['repositories']: | 708 | for p in projects['repositories']: |
765 | 628 | repo = p['repository'] | 709 | repo = p['repository'] |
766 | 629 | branch = p['branch'] | 710 | branch = p['branch'] |
767 | 711 | depth = '1' | ||
768 | 712 | if 'depth' in p.keys(): | ||
769 | 713 | depth = p['depth'] | ||
770 | 630 | if p['name'] == 'requirements': | 714 | if p['name'] == 'requirements': |
771 | 631 | repo_dir = _git_clone_and_install_single(repo, branch, depth, | 715 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
772 | 632 | parent_dir, http_proxy, | 716 | parent_dir, http_proxy, |
773 | @@ -671,19 +755,13 @@ | |||
774 | 671 | """ | 755 | """ |
775 | 672 | Clone and install a single git repository. | 756 | Clone and install a single git repository. |
776 | 673 | """ | 757 | """ |
777 | 674 | dest_dir = os.path.join(parent_dir, os.path.basename(repo)) | ||
778 | 675 | |||
779 | 676 | if not os.path.exists(parent_dir): | 758 | if not os.path.exists(parent_dir): |
780 | 677 | juju_log('Directory already exists at {}. ' | 759 | juju_log('Directory already exists at {}. ' |
781 | 678 | 'No need to create directory.'.format(parent_dir)) | 760 | 'No need to create directory.'.format(parent_dir)) |
782 | 679 | os.mkdir(parent_dir) | 761 | os.mkdir(parent_dir) |
783 | 680 | 762 | ||
790 | 681 | if not os.path.exists(dest_dir): | 763 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
791 | 682 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) | 764 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) |
786 | 683 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch, | ||
787 | 684 | depth=depth) | ||
788 | 685 | else: | ||
789 | 686 | repo_dir = dest_dir | ||
792 | 687 | 765 | ||
793 | 688 | venv = os.path.join(parent_dir, 'venv') | 766 | venv = os.path.join(parent_dir, 'venv') |
794 | 689 | 767 | ||
795 | 690 | 768 | ||
796 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' | |||
797 | --- hooks/charmhelpers/contrib/python/packages.py 2015-11-03 12:29:06 +0000 | |||
798 | +++ hooks/charmhelpers/contrib/python/packages.py 2016-02-12 19:36:45 +0000 | |||
799 | @@ -19,20 +19,35 @@ | |||
800 | 19 | 19 | ||
801 | 20 | import os | 20 | import os |
802 | 21 | import subprocess | 21 | import subprocess |
803 | 22 | import sys | ||
804 | 22 | 23 | ||
805 | 23 | from charmhelpers.fetch import apt_install, apt_update | 24 | from charmhelpers.fetch import apt_install, apt_update |
806 | 24 | from charmhelpers.core.hookenv import charm_dir, log | 25 | from charmhelpers.core.hookenv import charm_dir, log |
807 | 25 | 26 | ||
808 | 26 | try: | ||
809 | 27 | from pip import main as pip_execute | ||
810 | 28 | except ImportError: | ||
811 | 29 | apt_update() | ||
812 | 30 | apt_install('python-pip') | ||
813 | 31 | from pip import main as pip_execute | ||
814 | 32 | |||
815 | 33 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | 27 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" |
816 | 34 | 28 | ||
817 | 35 | 29 | ||
818 | 30 | def pip_execute(*args, **kwargs): | ||
819 | 31 | """Overriden pip_execute() to stop sys.path being changed. | ||
820 | 32 | |||
821 | 33 | The act of importing main from the pip module seems to cause add wheels | ||
822 | 34 | from the /usr/share/python-wheels which are installed by various tools. | ||
823 | 35 | This function ensures that sys.path remains the same after the call is | ||
824 | 36 | executed. | ||
825 | 37 | """ | ||
826 | 38 | try: | ||
827 | 39 | _path = sys.path | ||
828 | 40 | try: | ||
829 | 41 | from pip import main as _pip_execute | ||
830 | 42 | except ImportError: | ||
831 | 43 | apt_update() | ||
832 | 44 | apt_install('python-pip') | ||
833 | 45 | from pip import main as _pip_execute | ||
834 | 46 | _pip_execute(*args, **kwargs) | ||
835 | 47 | finally: | ||
836 | 48 | sys.path = _path | ||
837 | 49 | |||
838 | 50 | |||
839 | 36 | def parse_options(given, available): | 51 | def parse_options(given, available): |
840 | 37 | """Given a set of options, check if available""" | 52 | """Given a set of options, check if available""" |
841 | 38 | for key, value in sorted(given.items()): | 53 | for key, value in sorted(given.items()): |
842 | @@ -42,8 +57,12 @@ | |||
843 | 42 | yield "--{0}={1}".format(key, value) | 57 | yield "--{0}={1}".format(key, value) |
844 | 43 | 58 | ||
845 | 44 | 59 | ||
848 | 45 | def pip_install_requirements(requirements, **options): | 60 | def pip_install_requirements(requirements, constraints=None, **options): |
849 | 46 | """Install a requirements file """ | 61 | """Install a requirements file. |
850 | 62 | |||
851 | 63 | :param constraints: Path to pip constraints file. | ||
852 | 64 | http://pip.readthedocs.org/en/stable/user_guide/#constraints-files | ||
853 | 65 | """ | ||
854 | 47 | command = ["install"] | 66 | command = ["install"] |
855 | 48 | 67 | ||
856 | 49 | available_options = ('proxy', 'src', 'log', ) | 68 | available_options = ('proxy', 'src', 'log', ) |
857 | @@ -51,8 +70,13 @@ | |||
858 | 51 | command.append(option) | 70 | command.append(option) |
859 | 52 | 71 | ||
860 | 53 | command.append("-r {0}".format(requirements)) | 72 | command.append("-r {0}".format(requirements)) |
863 | 54 | log("Installing from file: {} with options: {}".format(requirements, | 73 | if constraints: |
864 | 55 | command)) | 74 | command.append("-c {0}".format(constraints)) |
865 | 75 | log("Installing from file: {} with constraints {} " | ||
866 | 76 | "and options: {}".format(requirements, constraints, command)) | ||
867 | 77 | else: | ||
868 | 78 | log("Installing from file: {} with options: {}".format(requirements, | ||
869 | 79 | command)) | ||
870 | 56 | pip_execute(command) | 80 | pip_execute(command) |
871 | 57 | 81 | ||
872 | 58 | 82 | ||
873 | 59 | 83 | ||
874 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
875 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-11-03 12:29:06 +0000 | |||
876 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-02-12 19:36:45 +0000 | |||
877 | @@ -23,6 +23,8 @@ | |||
878 | 23 | # James Page <james.page@ubuntu.com> | 23 | # James Page <james.page@ubuntu.com> |
879 | 24 | # Adam Gandelman <adamg@ubuntu.com> | 24 | # Adam Gandelman <adamg@ubuntu.com> |
880 | 25 | # | 25 | # |
881 | 26 | import bisect | ||
882 | 27 | import six | ||
883 | 26 | 28 | ||
884 | 27 | import os | 29 | import os |
885 | 28 | import shutil | 30 | import shutil |
886 | @@ -72,6 +74,394 @@ | |||
887 | 72 | err to syslog = {use_syslog} | 74 | err to syslog = {use_syslog} |
888 | 73 | clog to syslog = {use_syslog} | 75 | clog to syslog = {use_syslog} |
889 | 74 | """ | 76 | """ |
890 | 77 | # For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) | ||
891 | 78 | powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] | ||
892 | 79 | |||
893 | 80 | |||
894 | 81 | def validator(value, valid_type, valid_range=None): | ||
895 | 82 | """ | ||
896 | 83 | Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values | ||
897 | 84 | Example input: | ||
898 | 85 | validator(value=1, | ||
899 | 86 | valid_type=int, | ||
900 | 87 | valid_range=[0, 2]) | ||
901 | 88 | This says I'm testing value=1. It must be an int inclusive in [0,2] | ||
902 | 89 | |||
903 | 90 | :param value: The value to validate | ||
904 | 91 | :param valid_type: The type that value should be. | ||
905 | 92 | :param valid_range: A range of values that value can assume. | ||
906 | 93 | :return: | ||
907 | 94 | """ | ||
908 | 95 | assert isinstance(value, valid_type), "{} is not a {}".format( | ||
909 | 96 | value, | ||
910 | 97 | valid_type) | ||
911 | 98 | if valid_range is not None: | ||
912 | 99 | assert isinstance(valid_range, list), \ | ||
913 | 100 | "valid_range must be a list, was given {}".format(valid_range) | ||
914 | 101 | # If we're dealing with strings | ||
915 | 102 | if valid_type is six.string_types: | ||
916 | 103 | assert value in valid_range, \ | ||
917 | 104 | "{} is not in the list {}".format(value, valid_range) | ||
918 | 105 | # Integer, float should have a min and max | ||
919 | 106 | else: | ||
920 | 107 | if len(valid_range) != 2: | ||
921 | 108 | raise ValueError( | ||
922 | 109 | "Invalid valid_range list of {} for {}. " | ||
923 | 110 | "List must be [min,max]".format(valid_range, value)) | ||
924 | 111 | assert value >= valid_range[0], \ | ||
925 | 112 | "{} is less than minimum allowed value of {}".format( | ||
926 | 113 | value, valid_range[0]) | ||
927 | 114 | assert value <= valid_range[1], \ | ||
928 | 115 | "{} is greater than maximum allowed value of {}".format( | ||
929 | 116 | value, valid_range[1]) | ||
930 | 117 | |||
931 | 118 | |||
932 | 119 | class PoolCreationError(Exception): | ||
933 | 120 | """ | ||
934 | 121 | A custom error to inform the caller that a pool creation failed. Provides an error message | ||
935 | 122 | """ | ||
936 | 123 | def __init__(self, message): | ||
937 | 124 | super(PoolCreationError, self).__init__(message) | ||
938 | 125 | |||
939 | 126 | |||
940 | 127 | class Pool(object): | ||
941 | 128 | """ | ||
942 | 129 | An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. | ||
943 | 130 | Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). | ||
944 | 131 | """ | ||
945 | 132 | def __init__(self, service, name): | ||
946 | 133 | self.service = service | ||
947 | 134 | self.name = name | ||
948 | 135 | |||
949 | 136 | # Create the pool if it doesn't exist already | ||
950 | 137 | # To be implemented by subclasses | ||
951 | 138 | def create(self): | ||
952 | 139 | pass | ||
953 | 140 | |||
954 | 141 | def add_cache_tier(self, cache_pool, mode): | ||
955 | 142 | """ | ||
956 | 143 | Adds a new cache tier to an existing pool. | ||
957 | 144 | :param cache_pool: six.string_types. The cache tier pool name to add. | ||
958 | 145 | :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] | ||
959 | 146 | :return: None | ||
960 | 147 | """ | ||
961 | 148 | # Check the input types and values | ||
962 | 149 | validator(value=cache_pool, valid_type=six.string_types) | ||
963 | 150 | validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) | ||
964 | 151 | |||
965 | 152 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) | ||
966 | 153 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) | ||
967 | 154 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) | ||
968 | 155 | check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) | ||
969 | 156 | |||
970 | 157 | def remove_cache_tier(self, cache_pool): | ||
971 | 158 | """ | ||
972 | 159 | Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. | ||
973 | 160 | :param cache_pool: six.string_types. The cache tier pool name to remove. | ||
974 | 161 | :return: None | ||
975 | 162 | """ | ||
976 | 163 | # read-only is easy, writeback is much harder | ||
977 | 164 | mode = get_cache_mode(cache_pool) | ||
978 | 165 | if mode == 'readonly': | ||
979 | 166 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) | ||
980 | 167 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) | ||
981 | 168 | |||
982 | 169 | elif mode == 'writeback': | ||
983 | 170 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) | ||
984 | 171 | # Flush the cache and wait for it to return | ||
985 | 172 | check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) | ||
986 | 173 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) | ||
987 | 174 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) | ||
988 | 175 | |||
989 | 176 | def get_pgs(self, pool_size): | ||
990 | 177 | """ | ||
991 | 178 | :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for | ||
992 | 179 | erasure coded pools | ||
993 | 180 | :return: int. The number of pgs to use. | ||
994 | 181 | """ | ||
995 | 182 | validator(value=pool_size, valid_type=int) | ||
996 | 183 | osds = get_osds(self.service) | ||
997 | 184 | if not osds: | ||
998 | 185 | # NOTE(james-page): Default to 200 for older ceph versions | ||
999 | 186 | # which don't support OSD query from cli | ||
1000 | 187 | return 200 | ||
1001 | 188 | |||
1002 | 189 | # Calculate based on Ceph best practices | ||
1003 | 190 | if osds < 5: | ||
1004 | 191 | return 128 | ||
1005 | 192 | elif 5 < osds < 10: | ||
1006 | 193 | return 512 | ||
1007 | 194 | elif 10 < osds < 50: | ||
1008 | 195 | return 4096 | ||
1009 | 196 | else: | ||
1010 | 197 | estimate = (osds * 100) / pool_size | ||
1011 | 198 | # Return the next nearest power of 2 | ||
1012 | 199 | index = bisect.bisect_right(powers_of_two, estimate) | ||
1013 | 200 | return powers_of_two[index] | ||
1014 | 201 | |||
1015 | 202 | |||
1016 | 203 | class ReplicatedPool(Pool): | ||
1017 | 204 | def __init__(self, service, name, replicas=2): | ||
1018 | 205 | super(ReplicatedPool, self).__init__(service=service, name=name) | ||
1019 | 206 | self.replicas = replicas | ||
1020 | 207 | |||
1021 | 208 | def create(self): | ||
1022 | 209 | if not pool_exists(self.service, self.name): | ||
1023 | 210 | # Create it | ||
1024 | 211 | pgs = self.get_pgs(self.replicas) | ||
1025 | 212 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] | ||
1026 | 213 | try: | ||
1027 | 214 | check_call(cmd) | ||
1028 | 215 | except CalledProcessError: | ||
1029 | 216 | raise | ||
1030 | 217 | |||
1031 | 218 | |||
1032 | 219 | # Default jerasure erasure coded pool | ||
1033 | 220 | class ErasurePool(Pool): | ||
1034 | 221 | def __init__(self, service, name, erasure_code_profile="default"): | ||
1035 | 222 | super(ErasurePool, self).__init__(service=service, name=name) | ||
1036 | 223 | self.erasure_code_profile = erasure_code_profile | ||
1037 | 224 | |||
1038 | 225 | def create(self): | ||
1039 | 226 | if not pool_exists(self.service, self.name): | ||
1040 | 227 | # Try to find the erasure profile information so we can properly size the pgs | ||
1041 | 228 | erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) | ||
1042 | 229 | |||
1043 | 230 | # Check for errors | ||
1044 | 231 | if erasure_profile is None: | ||
1045 | 232 | log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), | ||
1046 | 233 | level=ERROR) | ||
1047 | 234 | raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) | ||
1048 | 235 | if 'k' not in erasure_profile or 'm' not in erasure_profile: | ||
1049 | 236 | # Error | ||
1050 | 237 | log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), | ||
1051 | 238 | level=ERROR) | ||
1052 | 239 | raise PoolCreationError( | ||
1053 | 240 | message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) | ||
1054 | 241 | |||
1055 | 242 | pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) | ||
1056 | 243 | # Create it | ||
1057 | 244 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), | ||
1058 | 245 | 'erasure', self.erasure_code_profile] | ||
1059 | 246 | try: | ||
1060 | 247 | check_call(cmd) | ||
1061 | 248 | except CalledProcessError: | ||
1062 | 249 | raise | ||
1063 | 250 | |||
1064 | 251 | """Get an existing erasure code profile if it already exists. | ||
1065 | 252 | Returns json formatted output""" | ||
1066 | 253 | |||
1067 | 254 | |||
1068 | 255 | def get_erasure_profile(service, name): | ||
1069 | 256 | """ | ||
1070 | 257 | :param service: six.string_types. The Ceph user name to run the command under | ||
1071 | 258 | :param name: | ||
1072 | 259 | :return: | ||
1073 | 260 | """ | ||
1074 | 261 | try: | ||
1075 | 262 | out = check_output(['ceph', '--id', service, | ||
1076 | 263 | 'osd', 'erasure-code-profile', 'get', | ||
1077 | 264 | name, '--format=json']) | ||
1078 | 265 | return json.loads(out) | ||
1079 | 266 | except (CalledProcessError, OSError, ValueError): | ||
1080 | 267 | return None | ||
1081 | 268 | |||
1082 | 269 | |||
1083 | 270 | def pool_set(service, pool_name, key, value): | ||
1084 | 271 | """ | ||
1085 | 272 | Sets a value for a RADOS pool in ceph. | ||
1086 | 273 | :param service: six.string_types. The Ceph user name to run the command under | ||
1087 | 274 | :param pool_name: six.string_types | ||
1088 | 275 | :param key: six.string_types | ||
1089 | 276 | :param value: | ||
1090 | 277 | :return: None. Can raise CalledProcessError | ||
1091 | 278 | """ | ||
1092 | 279 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] | ||
1093 | 280 | try: | ||
1094 | 281 | check_call(cmd) | ||
1095 | 282 | except CalledProcessError: | ||
1096 | 283 | raise | ||
1097 | 284 | |||
1098 | 285 | |||
1099 | 286 | def snapshot_pool(service, pool_name, snapshot_name): | ||
1100 | 287 | """ | ||
1101 | 288 | Snapshots a RADOS pool in ceph. | ||
1102 | 289 | :param service: six.string_types. The Ceph user name to run the command under | ||
1103 | 290 | :param pool_name: six.string_types | ||
1104 | 291 | :param snapshot_name: six.string_types | ||
1105 | 292 | :return: None. Can raise CalledProcessError | ||
1106 | 293 | """ | ||
1107 | 294 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] | ||
1108 | 295 | try: | ||
1109 | 296 | check_call(cmd) | ||
1110 | 297 | except CalledProcessError: | ||
1111 | 298 | raise | ||
1112 | 299 | |||
1113 | 300 | |||
1114 | 301 | def remove_pool_snapshot(service, pool_name, snapshot_name): | ||
1115 | 302 | """ | ||
1116 | 303 | Remove a snapshot from a RADOS pool in ceph. | ||
1117 | 304 | :param service: six.string_types. The Ceph user name to run the command under | ||
1118 | 305 | :param pool_name: six.string_types | ||
1119 | 306 | :param snapshot_name: six.string_types | ||
1120 | 307 | :return: None. Can raise CalledProcessError | ||
1121 | 308 | """ | ||
1122 | 309 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] | ||
1123 | 310 | try: | ||
1124 | 311 | check_call(cmd) | ||
1125 | 312 | except CalledProcessError: | ||
1126 | 313 | raise | ||
1127 | 314 | |||
1128 | 315 | |||
1129 | 316 | # max_bytes should be an int or long | ||
1130 | 317 | def set_pool_quota(service, pool_name, max_bytes): | ||
1131 | 318 | """ | ||
1132 | 319 | :param service: six.string_types. The Ceph user name to run the command under | ||
1133 | 320 | :param pool_name: six.string_types | ||
1134 | 321 | :param max_bytes: int or long | ||
1135 | 322 | :return: None. Can raise CalledProcessError | ||
1136 | 323 | """ | ||
1137 | 324 | # Set a byte quota on a RADOS pool in ceph. | ||
1138 | 325 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] | ||
1139 | 326 | try: | ||
1140 | 327 | check_call(cmd) | ||
1141 | 328 | except CalledProcessError: | ||
1142 | 329 | raise | ||
1143 | 330 | |||
1144 | 331 | |||
1145 | 332 | def remove_pool_quota(service, pool_name): | ||
1146 | 333 | """ | ||
1147 | 334 | Set a byte quota on a RADOS pool in ceph. | ||
1148 | 335 | :param service: six.string_types. The Ceph user name to run the command under | ||
1149 | 336 | :param pool_name: six.string_types | ||
1150 | 337 | :return: None. Can raise CalledProcessError | ||
1151 | 338 | """ | ||
1152 | 339 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] | ||
1153 | 340 | try: | ||
1154 | 341 | check_call(cmd) | ||
1155 | 342 | except CalledProcessError: | ||
1156 | 343 | raise | ||
1157 | 344 | |||
1158 | 345 | |||
1159 | 346 | def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', | ||
1160 | 347 | data_chunks=2, coding_chunks=1, | ||
1161 | 348 | locality=None, durability_estimator=None): | ||
1162 | 349 | """ | ||
1163 | 350 | Create a new erasure code profile if one does not already exist for it. Updates | ||
1164 | 351 | the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ | ||
1165 | 352 | for more details | ||
1166 | 353 | :param service: six.string_types. The Ceph user name to run the command under | ||
1167 | 354 | :param profile_name: six.string_types | ||
1168 | 355 | :param erasure_plugin_name: six.string_types | ||
1169 | 356 | :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', | ||
1170 | 357 | 'room', 'root', 'row']) | ||
1171 | 358 | :param data_chunks: int | ||
1172 | 359 | :param coding_chunks: int | ||
1173 | 360 | :param locality: int | ||
1174 | 361 | :param durability_estimator: int | ||
1175 | 362 | :return: None. Can raise CalledProcessError | ||
1176 | 363 | """ | ||
1177 | 364 | # Ensure this failure_domain is allowed by Ceph | ||
1178 | 365 | validator(failure_domain, six.string_types, | ||
1179 | 366 | ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) | ||
1180 | 367 | |||
1181 | 368 | cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, | ||
1182 | 369 | 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), | ||
1183 | 370 | 'ruleset_failure_domain=' + failure_domain] | ||
1184 | 371 | if locality is not None and durability_estimator is not None: | ||
1185 | 372 | raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") | ||
1186 | 373 | |||
1187 | 374 | # Add plugin specific information | ||
1188 | 375 | if locality is not None: | ||
1189 | 376 | # For local erasure codes | ||
1190 | 377 | cmd.append('l=' + str(locality)) | ||
1191 | 378 | if durability_estimator is not None: | ||
1192 | 379 | # For Shec erasure codes | ||
1193 | 380 | cmd.append('c=' + str(durability_estimator)) | ||
1194 | 381 | |||
1195 | 382 | if erasure_profile_exists(service, profile_name): | ||
1196 | 383 | cmd.append('--force') | ||
1197 | 384 | |||
1198 | 385 | try: | ||
1199 | 386 | check_call(cmd) | ||
1200 | 387 | except CalledProcessError: | ||
1201 | 388 | raise | ||
1202 | 389 | |||
1203 | 390 | |||
1204 | 391 | def rename_pool(service, old_name, new_name): | ||
1205 | 392 | """ | ||
1206 | 393 | Rename a Ceph pool from old_name to new_name | ||
1207 | 394 | :param service: six.string_types. The Ceph user name to run the command under | ||
1208 | 395 | :param old_name: six.string_types | ||
1209 | 396 | :param new_name: six.string_types | ||
1210 | 397 | :return: None | ||
1211 | 398 | """ | ||
1212 | 399 | validator(value=old_name, valid_type=six.string_types) | ||
1213 | 400 | validator(value=new_name, valid_type=six.string_types) | ||
1214 | 401 | |||
1215 | 402 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] | ||
1216 | 403 | check_call(cmd) | ||
1217 | 404 | |||
1218 | 405 | |||
1219 | 406 | def erasure_profile_exists(service, name): | ||
1220 | 407 | """ | ||
1221 | 408 | Check to see if an Erasure code profile already exists. | ||
1222 | 409 | :param service: six.string_types. The Ceph user name to run the command under | ||
1223 | 410 | :param name: six.string_types | ||
1224 | 411 | :return: int or None | ||
1225 | 412 | """ | ||
1226 | 413 | validator(value=name, valid_type=six.string_types) | ||
1227 | 414 | try: | ||
1228 | 415 | check_call(['ceph', '--id', service, | ||
1229 | 416 | 'osd', 'erasure-code-profile', 'get', | ||
1230 | 417 | name]) | ||
1231 | 418 | return True | ||
1232 | 419 | except CalledProcessError: | ||
1233 | 420 | return False | ||
1234 | 421 | |||
1235 | 422 | |||
1236 | 423 | def get_cache_mode(service, pool_name): | ||
1237 | 424 | """ | ||
1238 | 425 | Find the current caching mode of the pool_name given. | ||
1239 | 426 | :param service: six.string_types. The Ceph user name to run the command under | ||
1240 | 427 | :param pool_name: six.string_types | ||
1241 | 428 | :return: int or None | ||
1242 | 429 | """ | ||
1243 | 430 | validator(value=service, valid_type=six.string_types) | ||
1244 | 431 | validator(value=pool_name, valid_type=six.string_types) | ||
1245 | 432 | out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) | ||
1246 | 433 | try: | ||
1247 | 434 | osd_json = json.loads(out) | ||
1248 | 435 | for pool in osd_json['pools']: | ||
1249 | 436 | if pool['pool_name'] == pool_name: | ||
1250 | 437 | return pool['cache_mode'] | ||
1251 | 438 | return None | ||
1252 | 439 | except ValueError: | ||
1253 | 440 | raise | ||
1254 | 441 | |||
1255 | 442 | |||
1256 | 443 | def pool_exists(service, name): | ||
1257 | 444 | """Check to see if a RADOS pool already exists.""" | ||
1258 | 445 | try: | ||
1259 | 446 | out = check_output(['rados', '--id', service, | ||
1260 | 447 | 'lspools']).decode('UTF-8') | ||
1261 | 448 | except CalledProcessError: | ||
1262 | 449 | return False | ||
1263 | 450 | |||
1264 | 451 | return name in out | ||
1265 | 452 | |||
1266 | 453 | |||
1267 | 454 | def get_osds(service): | ||
1268 | 455 | """Return a list of all Ceph Object Storage Daemons currently in the | ||
1269 | 456 | cluster. | ||
1270 | 457 | """ | ||
1271 | 458 | version = ceph_version() | ||
1272 | 459 | if version and version >= '0.56': | ||
1273 | 460 | return json.loads(check_output(['ceph', '--id', service, | ||
1274 | 461 | 'osd', 'ls', | ||
1275 | 462 | '--format=json']).decode('UTF-8')) | ||
1276 | 463 | |||
1277 | 464 | return None | ||
1278 | 75 | 465 | ||
1279 | 76 | 466 | ||
1280 | 77 | def install(): | 467 | def install(): |
1281 | @@ -101,53 +491,37 @@ | |||
1282 | 101 | check_call(cmd) | 491 | check_call(cmd) |
1283 | 102 | 492 | ||
1284 | 103 | 493 | ||
1310 | 104 | def pool_exists(service, name): | 494 | def update_pool(client, pool, settings): |
1311 | 105 | """Check to see if a RADOS pool already exists.""" | 495 | cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] |
1312 | 106 | try: | 496 | for k, v in six.iteritems(settings): |
1313 | 107 | out = check_output(['rados', '--id', service, | 497 | cmd.append(k) |
1314 | 108 | 'lspools']).decode('UTF-8') | 498 | cmd.append(v) |
1315 | 109 | except CalledProcessError: | 499 | |
1316 | 110 | return False | 500 | check_call(cmd) |
1317 | 111 | 501 | ||
1318 | 112 | return name in out | 502 | |
1319 | 113 | 503 | def create_pool(service, name, replicas=3, pg_num=None): | |
1295 | 114 | |||
1296 | 115 | def get_osds(service): | ||
1297 | 116 | """Return a list of all Ceph Object Storage Daemons currently in the | ||
1298 | 117 | cluster. | ||
1299 | 118 | """ | ||
1300 | 119 | version = ceph_version() | ||
1301 | 120 | if version and version >= '0.56': | ||
1302 | 121 | return json.loads(check_output(['ceph', '--id', service, | ||
1303 | 122 | 'osd', 'ls', | ||
1304 | 123 | '--format=json']).decode('UTF-8')) | ||
1305 | 124 | |||
1306 | 125 | return None | ||
1307 | 126 | |||
1308 | 127 | |||
1309 | 128 | def create_pool(service, name, replicas=3): | ||
1320 | 129 | """Create a new RADOS pool.""" | 504 | """Create a new RADOS pool.""" |
1321 | 130 | if pool_exists(service, name): | 505 | if pool_exists(service, name): |
1322 | 131 | log("Ceph pool {} already exists, skipping creation".format(name), | 506 | log("Ceph pool {} already exists, skipping creation".format(name), |
1323 | 132 | level=WARNING) | 507 | level=WARNING) |
1324 | 133 | return | 508 | return |
1325 | 134 | 509 | ||
1342 | 135 | # Calculate the number of placement groups based | 510 | if not pg_num: |
1343 | 136 | # on upstream recommended best practices. | 511 | # Calculate the number of placement groups based |
1344 | 137 | osds = get_osds(service) | 512 | # on upstream recommended best practices. |
1345 | 138 | if osds: | 513 | osds = get_osds(service) |
1346 | 139 | pgnum = (len(osds) * 100 // replicas) | 514 | if osds: |
1347 | 140 | else: | 515 | pg_num = (len(osds) * 100 // replicas) |
1348 | 141 | # NOTE(james-page): Default to 200 for older ceph versions | 516 | else: |
1349 | 142 | # which don't support OSD query from cli | 517 | # NOTE(james-page): Default to 200 for older ceph versions |
1350 | 143 | pgnum = 200 | 518 | # which don't support OSD query from cli |
1351 | 144 | 519 | pg_num = 200 | |
1352 | 145 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] | 520 | |
1353 | 146 | check_call(cmd) | 521 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] |
1354 | 147 | 522 | check_call(cmd) | |
1355 | 148 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', | 523 | |
1356 | 149 | str(replicas)] | 524 | update_pool(service, name, settings={'size': str(replicas)}) |
1341 | 150 | check_call(cmd) | ||
1357 | 151 | 525 | ||
1358 | 152 | 526 | ||
1359 | 153 | def delete_pool(service, name): | 527 | def delete_pool(service, name): |
1360 | @@ -202,10 +576,10 @@ | |||
1361 | 202 | log('Created new keyfile at %s.' % keyfile, level=INFO) | 576 | log('Created new keyfile at %s.' % keyfile, level=INFO) |
1362 | 203 | 577 | ||
1363 | 204 | 578 | ||
1366 | 205 | def get_ceph_nodes(): | 579 | def get_ceph_nodes(relation='ceph'): |
1367 | 206 | """Query named relation 'ceph' to determine current nodes.""" | 580 | """Query named relation to determine current nodes.""" |
1368 | 207 | hosts = [] | 581 | hosts = [] |
1370 | 208 | for r_id in relation_ids('ceph'): | 582 | for r_id in relation_ids(relation): |
1371 | 209 | for unit in related_units(r_id): | 583 | for unit in related_units(r_id): |
1372 | 210 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | 584 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
1373 | 211 | 585 | ||
1374 | @@ -357,14 +731,14 @@ | |||
1375 | 357 | service_start(svc) | 731 | service_start(svc) |
1376 | 358 | 732 | ||
1377 | 359 | 733 | ||
1379 | 360 | def ensure_ceph_keyring(service, user=None, group=None): | 734 | def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): |
1380 | 361 | """Ensures a ceph keyring is created for a named service and optionally | 735 | """Ensures a ceph keyring is created for a named service and optionally |
1381 | 362 | ensures user and group ownership. | 736 | ensures user and group ownership. |
1382 | 363 | 737 | ||
1383 | 364 | Returns False if no ceph key is available in relation state. | 738 | Returns False if no ceph key is available in relation state. |
1384 | 365 | """ | 739 | """ |
1385 | 366 | key = None | 740 | key = None |
1387 | 367 | for rid in relation_ids('ceph'): | 741 | for rid in relation_ids(relation): |
1388 | 368 | for unit in related_units(rid): | 742 | for unit in related_units(rid): |
1389 | 369 | key = relation_get('key', rid=rid, unit=unit) | 743 | key = relation_get('key', rid=rid, unit=unit) |
1390 | 370 | if key: | 744 | if key: |
1391 | @@ -405,6 +779,7 @@ | |||
1392 | 405 | 779 | ||
1393 | 406 | The API is versioned and defaults to version 1. | 780 | The API is versioned and defaults to version 1. |
1394 | 407 | """ | 781 | """ |
1395 | 782 | |||
1396 | 408 | def __init__(self, api_version=1, request_id=None): | 783 | def __init__(self, api_version=1, request_id=None): |
1397 | 409 | self.api_version = api_version | 784 | self.api_version = api_version |
1398 | 410 | if request_id: | 785 | if request_id: |
1399 | @@ -413,9 +788,16 @@ | |||
1400 | 413 | self.request_id = str(uuid.uuid1()) | 788 | self.request_id = str(uuid.uuid1()) |
1401 | 414 | self.ops = [] | 789 | self.ops = [] |
1402 | 415 | 790 | ||
1404 | 416 | def add_op_create_pool(self, name, replica_count=3): | 791 | def add_op_create_pool(self, name, replica_count=3, pg_num=None): |
1405 | 792 | """Adds an operation to create a pool. | ||
1406 | 793 | |||
1407 | 794 | @param pg_num setting: optional setting. If not provided, this value | ||
1408 | 795 | will be calculated by the broker based on how many OSDs are in the | ||
1409 | 796 | cluster at the time of creation. Note that, if provided, this value | ||
1410 | 797 | will be capped at the current available maximum. | ||
1411 | 798 | """ | ||
1412 | 417 | self.ops.append({'op': 'create-pool', 'name': name, | 799 | self.ops.append({'op': 'create-pool', 'name': name, |
1414 | 418 | 'replicas': replica_count}) | 800 | 'replicas': replica_count, 'pg_num': pg_num}) |
1415 | 419 | 801 | ||
1416 | 420 | def set_ops(self, ops): | 802 | def set_ops(self, ops): |
1417 | 421 | """Set request ops to provided value. | 803 | """Set request ops to provided value. |
1418 | @@ -433,8 +815,8 @@ | |||
1419 | 433 | def _ops_equal(self, other): | 815 | def _ops_equal(self, other): |
1420 | 434 | if len(self.ops) == len(other.ops): | 816 | if len(self.ops) == len(other.ops): |
1421 | 435 | for req_no in range(0, len(self.ops)): | 817 | for req_no in range(0, len(self.ops)): |
1424 | 436 | for key in ['replicas', 'name', 'op']: | 818 | for key in ['replicas', 'name', 'op', 'pg_num']: |
1425 | 437 | if self.ops[req_no][key] != other.ops[req_no][key]: | 819 | if self.ops[req_no].get(key) != other.ops[req_no].get(key): |
1426 | 438 | return False | 820 | return False |
1427 | 439 | else: | 821 | else: |
1428 | 440 | return False | 822 | return False |
1429 | @@ -540,7 +922,7 @@ | |||
1430 | 540 | return request | 922 | return request |
1431 | 541 | 923 | ||
1432 | 542 | 924 | ||
1434 | 543 | def get_request_states(request): | 925 | def get_request_states(request, relation='ceph'): |
1435 | 544 | """Return a dict of requests per relation id with their corresponding | 926 | """Return a dict of requests per relation id with their corresponding |
1436 | 545 | completion state. | 927 | completion state. |
1437 | 546 | 928 | ||
1438 | @@ -552,7 +934,7 @@ | |||
1439 | 552 | """ | 934 | """ |
1440 | 553 | complete = [] | 935 | complete = [] |
1441 | 554 | requests = {} | 936 | requests = {} |
1443 | 555 | for rid in relation_ids('ceph'): | 937 | for rid in relation_ids(relation): |
1444 | 556 | complete = False | 938 | complete = False |
1445 | 557 | previous_request = get_previous_request(rid) | 939 | previous_request = get_previous_request(rid) |
1446 | 558 | if request == previous_request: | 940 | if request == previous_request: |
1447 | @@ -570,14 +952,14 @@ | |||
1448 | 570 | return requests | 952 | return requests |
1449 | 571 | 953 | ||
1450 | 572 | 954 | ||
1452 | 573 | def is_request_sent(request): | 955 | def is_request_sent(request, relation='ceph'): |
1453 | 574 | """Check to see if a functionally equivalent request has already been sent | 956 | """Check to see if a functionally equivalent request has already been sent |
1454 | 575 | 957 | ||
1455 | 576 | Returns True if a similair request has been sent | 958 | Returns True if a similair request has been sent |
1456 | 577 | 959 | ||
1457 | 578 | @param request: A CephBrokerRq object | 960 | @param request: A CephBrokerRq object |
1458 | 579 | """ | 961 | """ |
1460 | 580 | states = get_request_states(request) | 962 | states = get_request_states(request, relation=relation) |
1461 | 581 | for rid in states.keys(): | 963 | for rid in states.keys(): |
1462 | 582 | if not states[rid]['sent']: | 964 | if not states[rid]['sent']: |
1463 | 583 | return False | 965 | return False |
1464 | @@ -585,7 +967,7 @@ | |||
1465 | 585 | return True | 967 | return True |
1466 | 586 | 968 | ||
1467 | 587 | 969 | ||
1469 | 588 | def is_request_complete(request): | 970 | def is_request_complete(request, relation='ceph'): |
1470 | 589 | """Check to see if a functionally equivalent request has already been | 971 | """Check to see if a functionally equivalent request has already been |
1471 | 590 | completed | 972 | completed |
1472 | 591 | 973 | ||
1473 | @@ -593,7 +975,7 @@ | |||
1474 | 593 | 975 | ||
1475 | 594 | @param request: A CephBrokerRq object | 976 | @param request: A CephBrokerRq object |
1476 | 595 | """ | 977 | """ |
1478 | 596 | states = get_request_states(request) | 978 | states = get_request_states(request, relation=relation) |
1479 | 597 | for rid in states.keys(): | 979 | for rid in states.keys(): |
1480 | 598 | if not states[rid]['complete']: | 980 | if not states[rid]['complete']: |
1481 | 599 | return False | 981 | return False |
1482 | @@ -643,15 +1025,15 @@ | |||
1483 | 643 | return 'broker-rsp-' + local_unit().replace('/', '-') | 1025 | return 'broker-rsp-' + local_unit().replace('/', '-') |
1484 | 644 | 1026 | ||
1485 | 645 | 1027 | ||
1487 | 646 | def send_request_if_needed(request): | 1028 | def send_request_if_needed(request, relation='ceph'): |
1488 | 647 | """Send broker request if an equivalent request has not already been sent | 1029 | """Send broker request if an equivalent request has not already been sent |
1489 | 648 | 1030 | ||
1490 | 649 | @param request: A CephBrokerRq object | 1031 | @param request: A CephBrokerRq object |
1491 | 650 | """ | 1032 | """ |
1493 | 651 | if is_request_sent(request): | 1033 | if is_request_sent(request, relation=relation): |
1494 | 652 | log('Request already sent but not complete, not sending new request', | 1034 | log('Request already sent but not complete, not sending new request', |
1495 | 653 | level=DEBUG) | 1035 | level=DEBUG) |
1496 | 654 | else: | 1036 | else: |
1498 | 655 | for rid in relation_ids('ceph'): | 1037 | for rid in relation_ids(relation): |
1499 | 656 | log('Sending request {}'.format(request.request_id), level=DEBUG) | 1038 | log('Sending request {}'.format(request.request_id), level=DEBUG) |
1500 | 657 | relation_set(relation_id=rid, broker_req=request.request) | 1039 | relation_set(relation_id=rid, broker_req=request.request) |
1501 | 658 | 1040 | ||
1502 | === modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
1503 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-06-24 12:22:08 +0000 | |||
1504 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-02-12 19:36:45 +0000 | |||
1505 | @@ -76,3 +76,13 @@ | |||
1506 | 76 | check_call(cmd) | 76 | check_call(cmd) |
1507 | 77 | 77 | ||
1508 | 78 | return create_loopback(path) | 78 | return create_loopback(path) |
1509 | 79 | |||
1510 | 80 | |||
1511 | 81 | def is_mapped_loopback_device(device): | ||
1512 | 82 | """ | ||
1513 | 83 | Checks if a given device name is an existing/mapped loopback device. | ||
1514 | 84 | :param device: str: Full path to the device (eg, /dev/loop1). | ||
1515 | 85 | :returns: str: Path to the backing file if is a loopback device | ||
1516 | 86 | empty string otherwise | ||
1517 | 87 | """ | ||
1518 | 88 | return loopback_devices().get(device, "") | ||
1519 | 79 | 89 | ||
1520 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1521 | --- hooks/charmhelpers/core/hookenv.py 2015-11-03 12:29:06 +0000 | |||
1522 | +++ hooks/charmhelpers/core/hookenv.py 2016-02-12 19:36:45 +0000 | |||
1523 | @@ -492,7 +492,7 @@ | |||
1524 | 492 | 492 | ||
1525 | 493 | @cached | 493 | @cached |
1526 | 494 | def peer_relation_id(): | 494 | def peer_relation_id(): |
1528 | 495 | '''Get a peer relation id if a peer relation has been joined, else None.''' | 495 | '''Get the peers relation id if a peers relation has been joined, else None.''' |
1529 | 496 | md = metadata() | 496 | md = metadata() |
1530 | 497 | section = md.get('peers') | 497 | section = md.get('peers') |
1531 | 498 | if section: | 498 | if section: |
1532 | @@ -517,12 +517,12 @@ | |||
1533 | 517 | def relation_to_role_and_interface(relation_name): | 517 | def relation_to_role_and_interface(relation_name): |
1534 | 518 | """ | 518 | """ |
1535 | 519 | Given the name of a relation, return the role and the name of the interface | 519 | Given the name of a relation, return the role and the name of the interface |
1537 | 520 | that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). | 520 | that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). |
1538 | 521 | 521 | ||
1539 | 522 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | 522 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
1540 | 523 | """ | 523 | """ |
1541 | 524 | _metadata = metadata() | 524 | _metadata = metadata() |
1543 | 525 | for role in ('provides', 'requires', 'peer'): | 525 | for role in ('provides', 'requires', 'peers'): |
1544 | 526 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | 526 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
1545 | 527 | if interface: | 527 | if interface: |
1546 | 528 | return role, interface | 528 | return role, interface |
1547 | @@ -534,7 +534,7 @@ | |||
1548 | 534 | """ | 534 | """ |
1549 | 535 | Given a role and interface name, return a list of relation names for the | 535 | Given a role and interface name, return a list of relation names for the |
1550 | 536 | current charm that use that interface under that role (where role is one | 536 | current charm that use that interface under that role (where role is one |
1552 | 537 | of ``provides``, ``requires``, or ``peer``). | 537 | of ``provides``, ``requires``, or ``peers``). |
1553 | 538 | 538 | ||
1554 | 539 | :returns: A list of relation names. | 539 | :returns: A list of relation names. |
1555 | 540 | """ | 540 | """ |
1556 | @@ -555,7 +555,7 @@ | |||
1557 | 555 | :returns: A list of relation names. | 555 | :returns: A list of relation names. |
1558 | 556 | """ | 556 | """ |
1559 | 557 | results = [] | 557 | results = [] |
1561 | 558 | for role in ('provides', 'requires', 'peer'): | 558 | for role in ('provides', 'requires', 'peers'): |
1562 | 559 | results.extend(role_and_interface_to_relations(role, interface_name)) | 559 | results.extend(role_and_interface_to_relations(role, interface_name)) |
1563 | 560 | return results | 560 | return results |
1564 | 561 | 561 | ||
1565 | @@ -637,7 +637,7 @@ | |||
1566 | 637 | 637 | ||
1567 | 638 | 638 | ||
1568 | 639 | @cached | 639 | @cached |
1570 | 640 | def storage_get(attribute="", storage_id=""): | 640 | def storage_get(attribute=None, storage_id=None): |
1571 | 641 | """Get storage attributes""" | 641 | """Get storage attributes""" |
1572 | 642 | _args = ['storage-get', '--format=json'] | 642 | _args = ['storage-get', '--format=json'] |
1573 | 643 | if storage_id: | 643 | if storage_id: |
1574 | @@ -651,7 +651,7 @@ | |||
1575 | 651 | 651 | ||
1576 | 652 | 652 | ||
1577 | 653 | @cached | 653 | @cached |
1579 | 654 | def storage_list(storage_name=""): | 654 | def storage_list(storage_name=None): |
1580 | 655 | """List the storage IDs for the unit""" | 655 | """List the storage IDs for the unit""" |
1581 | 656 | _args = ['storage-list', '--format=json'] | 656 | _args = ['storage-list', '--format=json'] |
1582 | 657 | if storage_name: | 657 | if storage_name: |
1583 | @@ -878,6 +878,40 @@ | |||
1584 | 878 | subprocess.check_call(cmd) | 878 | subprocess.check_call(cmd) |
1585 | 879 | 879 | ||
1586 | 880 | 880 | ||
1587 | 881 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1588 | 882 | def payload_register(ptype, klass, pid): | ||
1589 | 883 | """ is used while a hook is running to let Juju know that a | ||
1590 | 884 | payload has been started.""" | ||
1591 | 885 | cmd = ['payload-register'] | ||
1592 | 886 | for x in [ptype, klass, pid]: | ||
1593 | 887 | cmd.append(x) | ||
1594 | 888 | subprocess.check_call(cmd) | ||
1595 | 889 | |||
1596 | 890 | |||
1597 | 891 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1598 | 892 | def payload_unregister(klass, pid): | ||
1599 | 893 | """ is used while a hook is running to let Juju know | ||
1600 | 894 | that a payload has been manually stopped. The <class> and <id> provided | ||
1601 | 895 | must match a payload that has been previously registered with juju using | ||
1602 | 896 | payload-register.""" | ||
1603 | 897 | cmd = ['payload-unregister'] | ||
1604 | 898 | for x in [klass, pid]: | ||
1605 | 899 | cmd.append(x) | ||
1606 | 900 | subprocess.check_call(cmd) | ||
1607 | 901 | |||
1608 | 902 | |||
1609 | 903 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1610 | 904 | def payload_status_set(klass, pid, status): | ||
1611 | 905 | """is used to update the current status of a registered payload. | ||
1612 | 906 | The <class> and <id> provided must match a payload that has been previously | ||
1613 | 907 | registered with juju using payload-register. The <status> must be one of the | ||
1614 | 908 | follow: starting, started, stopping, stopped""" | ||
1615 | 909 | cmd = ['payload-status-set'] | ||
1616 | 910 | for x in [klass, pid, status]: | ||
1617 | 911 | cmd.append(x) | ||
1618 | 912 | subprocess.check_call(cmd) | ||
1619 | 913 | |||
1620 | 914 | |||
1621 | 881 | @cached | 915 | @cached |
1622 | 882 | def juju_version(): | 916 | def juju_version(): |
1623 | 883 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | 917 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
1624 | 884 | 918 | ||
1625 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1626 | --- hooks/charmhelpers/core/host.py 2015-11-03 12:29:06 +0000 | |||
1627 | +++ hooks/charmhelpers/core/host.py 2016-02-12 19:36:45 +0000 | |||
1628 | @@ -67,10 +67,14 @@ | |||
1629 | 67 | """Pause a system service. | 67 | """Pause a system service. |
1630 | 68 | 68 | ||
1631 | 69 | Stop it, and prevent it from starting again at boot.""" | 69 | Stop it, and prevent it from starting again at boot.""" |
1633 | 70 | stopped = service_stop(service_name) | 70 | stopped = True |
1634 | 71 | if service_running(service_name): | ||
1635 | 72 | stopped = service_stop(service_name) | ||
1636 | 71 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | 73 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
1637 | 72 | sysv_file = os.path.join(initd_dir, service_name) | 74 | sysv_file = os.path.join(initd_dir, service_name) |
1639 | 73 | if os.path.exists(upstart_file): | 75 | if init_is_systemd(): |
1640 | 76 | service('disable', service_name) | ||
1641 | 77 | elif os.path.exists(upstart_file): | ||
1642 | 74 | override_path = os.path.join( | 78 | override_path = os.path.join( |
1643 | 75 | init_dir, '{}.override'.format(service_name)) | 79 | init_dir, '{}.override'.format(service_name)) |
1644 | 76 | with open(override_path, 'w') as fh: | 80 | with open(override_path, 'w') as fh: |
1645 | @@ -78,9 +82,9 @@ | |||
1646 | 78 | elif os.path.exists(sysv_file): | 82 | elif os.path.exists(sysv_file): |
1647 | 79 | subprocess.check_call(["update-rc.d", service_name, "disable"]) | 83 | subprocess.check_call(["update-rc.d", service_name, "disable"]) |
1648 | 80 | else: | 84 | else: |
1649 | 81 | # XXX: Support SystemD too | ||
1650 | 82 | raise ValueError( | 85 | raise ValueError( |
1652 | 83 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | 86 | "Unable to detect {0} as SystemD, Upstart {1} or" |
1653 | 87 | " SysV {2}".format( | ||
1654 | 84 | service_name, upstart_file, sysv_file)) | 88 | service_name, upstart_file, sysv_file)) |
1655 | 85 | return stopped | 89 | return stopped |
1656 | 86 | 90 | ||
1657 | @@ -92,7 +96,9 @@ | |||
1658 | 92 | Reenable starting again at boot. Start the service""" | 96 | Reenable starting again at boot. Start the service""" |
1659 | 93 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | 97 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
1660 | 94 | sysv_file = os.path.join(initd_dir, service_name) | 98 | sysv_file = os.path.join(initd_dir, service_name) |
1662 | 95 | if os.path.exists(upstart_file): | 99 | if init_is_systemd(): |
1663 | 100 | service('enable', service_name) | ||
1664 | 101 | elif os.path.exists(upstart_file): | ||
1665 | 96 | override_path = os.path.join( | 102 | override_path = os.path.join( |
1666 | 97 | init_dir, '{}.override'.format(service_name)) | 103 | init_dir, '{}.override'.format(service_name)) |
1667 | 98 | if os.path.exists(override_path): | 104 | if os.path.exists(override_path): |
1668 | @@ -100,34 +106,43 @@ | |||
1669 | 100 | elif os.path.exists(sysv_file): | 106 | elif os.path.exists(sysv_file): |
1670 | 101 | subprocess.check_call(["update-rc.d", service_name, "enable"]) | 107 | subprocess.check_call(["update-rc.d", service_name, "enable"]) |
1671 | 102 | else: | 108 | else: |
1672 | 103 | # XXX: Support SystemD too | ||
1673 | 104 | raise ValueError( | 109 | raise ValueError( |
1675 | 105 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | 110 | "Unable to detect {0} as SystemD, Upstart {1} or" |
1676 | 111 | " SysV {2}".format( | ||
1677 | 106 | service_name, upstart_file, sysv_file)) | 112 | service_name, upstart_file, sysv_file)) |
1678 | 107 | 113 | ||
1680 | 108 | started = service_start(service_name) | 114 | started = service_running(service_name) |
1681 | 115 | if not started: | ||
1682 | 116 | started = service_start(service_name) | ||
1683 | 109 | return started | 117 | return started |
1684 | 110 | 118 | ||
1685 | 111 | 119 | ||
1686 | 112 | def service(action, service_name): | 120 | def service(action, service_name): |
1687 | 113 | """Control a system service""" | 121 | """Control a system service""" |
1689 | 114 | cmd = ['service', service_name, action] | 122 | if init_is_systemd(): |
1690 | 123 | cmd = ['systemctl', action, service_name] | ||
1691 | 124 | else: | ||
1692 | 125 | cmd = ['service', service_name, action] | ||
1693 | 115 | return subprocess.call(cmd) == 0 | 126 | return subprocess.call(cmd) == 0 |
1694 | 116 | 127 | ||
1695 | 117 | 128 | ||
1697 | 118 | def service_running(service): | 129 | def service_running(service_name): |
1698 | 119 | """Determine whether a system service is running""" | 130 | """Determine whether a system service is running""" |
1705 | 120 | try: | 131 | if init_is_systemd(): |
1706 | 121 | output = subprocess.check_output( | 132 | return service('is-active', service_name) |
1701 | 122 | ['service', service, 'status'], | ||
1702 | 123 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
1703 | 124 | except subprocess.CalledProcessError: | ||
1704 | 125 | return False | ||
1707 | 126 | else: | 133 | else: |
1711 | 127 | if ("start/running" in output or "is running" in output): | 134 | try: |
1712 | 128 | return True | 135 | output = subprocess.check_output( |
1713 | 129 | else: | 136 | ['service', service_name, 'status'], |
1714 | 137 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
1715 | 138 | except subprocess.CalledProcessError: | ||
1716 | 130 | return False | 139 | return False |
1717 | 140 | else: | ||
1718 | 141 | if ("start/running" in output or "is running" in output or | ||
1719 | 142 | "up and running" in output): | ||
1720 | 143 | return True | ||
1721 | 144 | else: | ||
1722 | 145 | return False | ||
1723 | 131 | 146 | ||
1724 | 132 | 147 | ||
1725 | 133 | def service_available(service_name): | 148 | def service_available(service_name): |
1726 | @@ -142,8 +157,29 @@ | |||
1727 | 142 | return True | 157 | return True |
1728 | 143 | 158 | ||
1729 | 144 | 159 | ||
1732 | 145 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 160 | SYSTEMD_SYSTEM = '/run/systemd/system' |
1733 | 146 | """Add a user to the system""" | 161 | |
1734 | 162 | |||
1735 | 163 | def init_is_systemd(): | ||
1736 | 164 | """Return True if the host system uses systemd, False otherwise.""" | ||
1737 | 165 | return os.path.isdir(SYSTEMD_SYSTEM) | ||
1738 | 166 | |||
1739 | 167 | |||
1740 | 168 | def adduser(username, password=None, shell='/bin/bash', system_user=False, | ||
1741 | 169 | primary_group=None, secondary_groups=None): | ||
1742 | 170 | """Add a user to the system. | ||
1743 | 171 | |||
1744 | 172 | Will log but otherwise succeed if the user already exists. | ||
1745 | 173 | |||
1746 | 174 | :param str username: Username to create | ||
1747 | 175 | :param str password: Password for user; if ``None``, create a system user | ||
1748 | 176 | :param str shell: The default shell for the user | ||
1749 | 177 | :param bool system_user: Whether to create a login or system user | ||
1750 | 178 | :param str primary_group: Primary group for user; defaults to username | ||
1751 | 179 | :param list secondary_groups: Optional list of additional groups | ||
1752 | 180 | |||
1753 | 181 | :returns: The password database entry struct, as returned by `pwd.getpwnam` | ||
1754 | 182 | """ | ||
1755 | 147 | try: | 183 | try: |
1756 | 148 | user_info = pwd.getpwnam(username) | 184 | user_info = pwd.getpwnam(username) |
1757 | 149 | log('user {0} already exists!'.format(username)) | 185 | log('user {0} already exists!'.format(username)) |
1758 | @@ -158,6 +194,16 @@ | |||
1759 | 158 | '--shell', shell, | 194 | '--shell', shell, |
1760 | 159 | '--password', password, | 195 | '--password', password, |
1761 | 160 | ]) | 196 | ]) |
1762 | 197 | if not primary_group: | ||
1763 | 198 | try: | ||
1764 | 199 | grp.getgrnam(username) | ||
1765 | 200 | primary_group = username # avoid "group exists" error | ||
1766 | 201 | except KeyError: | ||
1767 | 202 | pass | ||
1768 | 203 | if primary_group: | ||
1769 | 204 | cmd.extend(['-g', primary_group]) | ||
1770 | 205 | if secondary_groups: | ||
1771 | 206 | cmd.extend(['-G', ','.join(secondary_groups)]) | ||
1772 | 161 | cmd.append(username) | 207 | cmd.append(username) |
1773 | 162 | subprocess.check_call(cmd) | 208 | subprocess.check_call(cmd) |
1774 | 163 | user_info = pwd.getpwnam(username) | 209 | user_info = pwd.getpwnam(username) |
1775 | @@ -255,14 +301,12 @@ | |||
1776 | 255 | 301 | ||
1777 | 256 | 302 | ||
1778 | 257 | def fstab_remove(mp): | 303 | def fstab_remove(mp): |
1781 | 258 | """Remove the given mountpoint entry from /etc/fstab | 304 | """Remove the given mountpoint entry from /etc/fstab""" |
1780 | 259 | """ | ||
1782 | 260 | return Fstab.remove_by_mountpoint(mp) | 305 | return Fstab.remove_by_mountpoint(mp) |
1783 | 261 | 306 | ||
1784 | 262 | 307 | ||
1785 | 263 | def fstab_add(dev, mp, fs, options=None): | 308 | def fstab_add(dev, mp, fs, options=None): |
1788 | 264 | """Adds the given device entry to the /etc/fstab file | 309 | """Adds the given device entry to the /etc/fstab file""" |
1787 | 265 | """ | ||
1789 | 266 | return Fstab.add(dev, mp, fs, options=options) | 310 | return Fstab.add(dev, mp, fs, options=options) |
1790 | 267 | 311 | ||
1791 | 268 | 312 | ||
1792 | @@ -318,8 +362,7 @@ | |||
1793 | 318 | 362 | ||
1794 | 319 | 363 | ||
1795 | 320 | def file_hash(path, hash_type='md5'): | 364 | def file_hash(path, hash_type='md5'): |
1798 | 321 | """ | 365 | """Generate a hash checksum of the contents of 'path' or None if not found. |
1797 | 322 | Generate a hash checksum of the contents of 'path' or None if not found. | ||
1799 | 323 | 366 | ||
1800 | 324 | :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, | 367 | :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, |
1801 | 325 | such as md5, sha1, sha256, sha512, etc. | 368 | such as md5, sha1, sha256, sha512, etc. |
1802 | @@ -334,10 +377,9 @@ | |||
1803 | 334 | 377 | ||
1804 | 335 | 378 | ||
1805 | 336 | def path_hash(path): | 379 | def path_hash(path): |
1810 | 337 | """ | 380 | """Generate a hash checksum of all files matching 'path'. Standard |
1811 | 338 | Generate a hash checksum of all files matching 'path'. Standard wildcards | 381 | wildcards like '*' and '?' are supported, see documentation for the 'glob' |
1812 | 339 | like '*' and '?' are supported, see documentation for the 'glob' module for | 382 | module for more information. |
1809 | 340 | more information. | ||
1813 | 341 | 383 | ||
1814 | 342 | :return: dict: A { filename: hash } dictionary for all matched files. | 384 | :return: dict: A { filename: hash } dictionary for all matched files. |
1815 | 343 | Empty if none found. | 385 | Empty if none found. |
1816 | @@ -349,8 +391,7 @@ | |||
1817 | 349 | 391 | ||
1818 | 350 | 392 | ||
1819 | 351 | def check_hash(path, checksum, hash_type='md5'): | 393 | def check_hash(path, checksum, hash_type='md5'): |
1822 | 352 | """ | 394 | """Validate a file using a cryptographic checksum. |
1821 | 353 | Validate a file using a cryptographic checksum. | ||
1823 | 354 | 395 | ||
1824 | 355 | :param str checksum: Value of the checksum used to validate the file. | 396 | :param str checksum: Value of the checksum used to validate the file. |
1825 | 356 | :param str hash_type: Hash algorithm used to generate `checksum`. | 397 | :param str hash_type: Hash algorithm used to generate `checksum`. |
1826 | @@ -365,6 +406,7 @@ | |||
1827 | 365 | 406 | ||
1828 | 366 | 407 | ||
1829 | 367 | class ChecksumError(ValueError): | 408 | class ChecksumError(ValueError): |
1830 | 409 | """A class derived from Value error to indicate the checksum failed.""" | ||
1831 | 368 | pass | 410 | pass |
1832 | 369 | 411 | ||
1833 | 370 | 412 | ||
1834 | @@ -470,7 +512,7 @@ | |||
1835 | 470 | 512 | ||
1836 | 471 | 513 | ||
1837 | 472 | def list_nics(nic_type=None): | 514 | def list_nics(nic_type=None): |
1839 | 473 | '''Return a list of nics of given type(s)''' | 515 | """Return a list of nics of given type(s)""" |
1840 | 474 | if isinstance(nic_type, six.string_types): | 516 | if isinstance(nic_type, six.string_types): |
1841 | 475 | int_types = [nic_type] | 517 | int_types = [nic_type] |
1842 | 476 | else: | 518 | else: |
1843 | @@ -512,12 +554,13 @@ | |||
1844 | 512 | 554 | ||
1845 | 513 | 555 | ||
1846 | 514 | def set_nic_mtu(nic, mtu): | 556 | def set_nic_mtu(nic, mtu): |
1848 | 515 | '''Set MTU on a network interface''' | 557 | """Set the Maximum Transmission Unit (MTU) on a network interface.""" |
1849 | 516 | cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] | 558 | cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] |
1850 | 517 | subprocess.check_call(cmd) | 559 | subprocess.check_call(cmd) |
1851 | 518 | 560 | ||
1852 | 519 | 561 | ||
1853 | 520 | def get_nic_mtu(nic): | 562 | def get_nic_mtu(nic): |
1854 | 563 | """Return the Maximum Transmission Unit (MTU) for a network interface.""" | ||
1855 | 521 | cmd = ['ip', 'addr', 'show', nic] | 564 | cmd = ['ip', 'addr', 'show', nic] |
1856 | 522 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') | 565 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') |
1857 | 523 | mtu = "" | 566 | mtu = "" |
1858 | @@ -529,6 +572,7 @@ | |||
1859 | 529 | 572 | ||
1860 | 530 | 573 | ||
1861 | 531 | def get_nic_hwaddr(nic): | 574 | def get_nic_hwaddr(nic): |
1862 | 575 | """Return the Media Access Control (MAC) for a network interface.""" | ||
1863 | 532 | cmd = ['ip', '-o', '-0', 'addr', 'show', nic] | 576 | cmd = ['ip', '-o', '-0', 'addr', 'show', nic] |
1864 | 533 | ip_output = subprocess.check_output(cmd).decode('UTF-8') | 577 | ip_output = subprocess.check_output(cmd).decode('UTF-8') |
1865 | 534 | hwaddr = "" | 578 | hwaddr = "" |
1866 | @@ -539,7 +583,7 @@ | |||
1867 | 539 | 583 | ||
1868 | 540 | 584 | ||
1869 | 541 | def cmp_pkgrevno(package, revno, pkgcache=None): | 585 | def cmp_pkgrevno(package, revno, pkgcache=None): |
1871 | 542 | '''Compare supplied revno with the revno of the installed package | 586 | """Compare supplied revno with the revno of the installed package |
1872 | 543 | 587 | ||
1873 | 544 | * 1 => Installed revno is greater than supplied arg | 588 | * 1 => Installed revno is greater than supplied arg |
1874 | 545 | * 0 => Installed revno is the same as supplied arg | 589 | * 0 => Installed revno is the same as supplied arg |
1875 | @@ -548,7 +592,7 @@ | |||
1876 | 548 | This function imports apt_cache function from charmhelpers.fetch if | 592 | This function imports apt_cache function from charmhelpers.fetch if |
1877 | 549 | the pkgcache argument is None. Be sure to add charmhelpers.fetch if | 593 | the pkgcache argument is None. Be sure to add charmhelpers.fetch if |
1878 | 550 | you call this function, or pass an apt_pkg.Cache() instance. | 594 | you call this function, or pass an apt_pkg.Cache() instance. |
1880 | 551 | ''' | 595 | """ |
1881 | 552 | import apt_pkg | 596 | import apt_pkg |
1882 | 553 | if not pkgcache: | 597 | if not pkgcache: |
1883 | 554 | from charmhelpers.fetch import apt_cache | 598 | from charmhelpers.fetch import apt_cache |
1884 | @@ -558,19 +602,27 @@ | |||
1885 | 558 | 602 | ||
1886 | 559 | 603 | ||
1887 | 560 | @contextmanager | 604 | @contextmanager |
1889 | 561 | def chdir(d): | 605 | def chdir(directory): |
1890 | 606 | """Change the current working directory to a different directory for a code | ||
1891 | 607 | block and return the previous directory after the block exits. Useful to | ||
1892 | 608 | run commands from a specificed directory. | ||
1893 | 609 | |||
1894 | 610 | :param str directory: The directory path to change to for this context. | ||
1895 | 611 | """ | ||
1896 | 562 | cur = os.getcwd() | 612 | cur = os.getcwd() |
1897 | 563 | try: | 613 | try: |
1899 | 564 | yield os.chdir(d) | 614 | yield os.chdir(directory) |
1900 | 565 | finally: | 615 | finally: |
1901 | 566 | os.chdir(cur) | 616 | os.chdir(cur) |
1902 | 567 | 617 | ||
1903 | 568 | 618 | ||
1904 | 569 | def chownr(path, owner, group, follow_links=True, chowntopdir=False): | 619 | def chownr(path, owner, group, follow_links=True, chowntopdir=False): |
1907 | 570 | """ | 620 | """Recursively change user and group ownership of files and directories |
1906 | 571 | Recursively change user and group ownership of files and directories | ||
1908 | 572 | in given path. Doesn't chown path itself by default, only its children. | 621 | in given path. Doesn't chown path itself by default, only its children. |
1909 | 573 | 622 | ||
1910 | 623 | :param str path: The string path to start changing ownership. | ||
1911 | 624 | :param str owner: The owner string to use when looking up the uid. | ||
1912 | 625 | :param str group: The group string to use when looking up the gid. | ||
1913 | 574 | :param bool follow_links: Also Chown links if True | 626 | :param bool follow_links: Also Chown links if True |
1914 | 575 | :param bool chowntopdir: Also chown path itself if True | 627 | :param bool chowntopdir: Also chown path itself if True |
1915 | 576 | """ | 628 | """ |
1916 | @@ -594,15 +646,23 @@ | |||
1917 | 594 | 646 | ||
1918 | 595 | 647 | ||
1919 | 596 | def lchownr(path, owner, group): | 648 | def lchownr(path, owner, group): |
1920 | 649 | """Recursively change user and group ownership of files and directories | ||
1921 | 650 | in a given path, not following symbolic links. See the documentation for | ||
1922 | 651 | 'os.lchown' for more information. | ||
1923 | 652 | |||
1924 | 653 | :param str path: The string path to start changing ownership. | ||
1925 | 654 | :param str owner: The owner string to use when looking up the uid. | ||
1926 | 655 | :param str group: The group string to use when looking up the gid. | ||
1927 | 656 | """ | ||
1928 | 597 | chownr(path, owner, group, follow_links=False) | 657 | chownr(path, owner, group, follow_links=False) |
1929 | 598 | 658 | ||
1930 | 599 | 659 | ||
1931 | 600 | def get_total_ram(): | 660 | def get_total_ram(): |
1933 | 601 | '''The total amount of system RAM in bytes. | 661 | """The total amount of system RAM in bytes. |
1934 | 602 | 662 | ||
1935 | 603 | This is what is reported by the OS, and may be overcommitted when | 663 | This is what is reported by the OS, and may be overcommitted when |
1936 | 604 | there are multiple containers hosted on the same machine. | 664 | there are multiple containers hosted on the same machine. |
1938 | 605 | ''' | 665 | """ |
1939 | 606 | with open('/proc/meminfo', 'r') as f: | 666 | with open('/proc/meminfo', 'r') as f: |
1940 | 607 | for line in f.readlines(): | 667 | for line in f.readlines(): |
1941 | 608 | if line: | 668 | if line: |
1942 | 609 | 669 | ||
1943 | === modified file 'hooks/charmhelpers/core/services/helpers.py' | |||
1944 | --- hooks/charmhelpers/core/services/helpers.py 2015-11-03 12:29:06 +0000 | |||
1945 | +++ hooks/charmhelpers/core/services/helpers.py 2016-02-12 19:36:45 +0000 | |||
1946 | @@ -243,13 +243,15 @@ | |||
1947 | 243 | :param str source: The template source file, relative to | 243 | :param str source: The template source file, relative to |
1948 | 244 | `$CHARM_DIR/templates` | 244 | `$CHARM_DIR/templates` |
1949 | 245 | 245 | ||
1951 | 246 | :param str target: The target to write the rendered template to | 246 | :param str target: The target to write the rendered template to (or None) |
1952 | 247 | :param str owner: The owner of the rendered file | 247 | :param str owner: The owner of the rendered file |
1953 | 248 | :param str group: The group of the rendered file | 248 | :param str group: The group of the rendered file |
1954 | 249 | :param int perms: The permissions of the rendered file | 249 | :param int perms: The permissions of the rendered file |
1955 | 250 | :param partial on_change_action: functools partial to be executed when | 250 | :param partial on_change_action: functools partial to be executed when |
1956 | 251 | rendered file changes | 251 | rendered file changes |
1957 | 252 | :param jinja2 loader template_loader: A jinja2 template loader | 252 | :param jinja2 loader template_loader: A jinja2 template loader |
1958 | 253 | |||
1959 | 254 | :return str: The rendered template | ||
1960 | 253 | """ | 255 | """ |
1961 | 254 | def __init__(self, source, target, | 256 | def __init__(self, source, target, |
1962 | 255 | owner='root', group='root', perms=0o444, | 257 | owner='root', group='root', perms=0o444, |
1963 | @@ -267,12 +269,14 @@ | |||
1964 | 267 | if self.on_change_action and os.path.isfile(self.target): | 269 | if self.on_change_action and os.path.isfile(self.target): |
1965 | 268 | pre_checksum = host.file_hash(self.target) | 270 | pre_checksum = host.file_hash(self.target) |
1966 | 269 | service = manager.get_service(service_name) | 271 | service = manager.get_service(service_name) |
1968 | 270 | context = {} | 272 | context = {'ctx': {}} |
1969 | 271 | for ctx in service.get('required_data', []): | 273 | for ctx in service.get('required_data', []): |
1970 | 272 | context.update(ctx) | 274 | context.update(ctx) |
1974 | 273 | templating.render(self.source, self.target, context, | 275 | context['ctx'].update(ctx) |
1975 | 274 | self.owner, self.group, self.perms, | 276 | |
1976 | 275 | template_loader=self.template_loader) | 277 | result = templating.render(self.source, self.target, context, |
1977 | 278 | self.owner, self.group, self.perms, | ||
1978 | 279 | template_loader=self.template_loader) | ||
1979 | 276 | if self.on_change_action: | 280 | if self.on_change_action: |
1980 | 277 | if pre_checksum == host.file_hash(self.target): | 281 | if pre_checksum == host.file_hash(self.target): |
1981 | 278 | hookenv.log( | 282 | hookenv.log( |
1982 | @@ -281,6 +285,8 @@ | |||
1983 | 281 | else: | 285 | else: |
1984 | 282 | self.on_change_action() | 286 | self.on_change_action() |
1985 | 283 | 287 | ||
1986 | 288 | return result | ||
1987 | 289 | |||
1988 | 284 | 290 | ||
1989 | 285 | # Convenience aliases for templates | 291 | # Convenience aliases for templates |
1990 | 286 | render_template = template = TemplateCallback | 292 | render_template = template = TemplateCallback |
1991 | 287 | 293 | ||
1992 | === modified file 'hooks/charmhelpers/core/templating.py' | |||
1993 | --- hooks/charmhelpers/core/templating.py 2015-11-03 12:29:06 +0000 | |||
1994 | +++ hooks/charmhelpers/core/templating.py 2016-02-12 19:36:45 +0000 | |||
1995 | @@ -27,7 +27,8 @@ | |||
1996 | 27 | 27 | ||
1997 | 28 | The `source` path, if not absolute, is relative to the `templates_dir`. | 28 | The `source` path, if not absolute, is relative to the `templates_dir`. |
1998 | 29 | 29 | ||
2000 | 30 | The `target` path should be absolute. | 30 | The `target` path should be absolute. It can also be `None`, in which |
2001 | 31 | case no file will be written. | ||
2002 | 31 | 32 | ||
2003 | 32 | The context should be a dict containing the values to be replaced in the | 33 | The context should be a dict containing the values to be replaced in the |
2004 | 33 | template. | 34 | template. |
2005 | @@ -36,6 +37,9 @@ | |||
2006 | 36 | 37 | ||
2007 | 37 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | 38 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. |
2008 | 38 | 39 | ||
2009 | 40 | The rendered template will be written to the file as well as being returned | ||
2010 | 41 | as a string. | ||
2011 | 42 | |||
2012 | 39 | Note: Using this requires python-jinja2; if it is not installed, calling | 43 | Note: Using this requires python-jinja2; if it is not installed, calling |
2013 | 40 | this will attempt to use charmhelpers.fetch.apt_install to install it. | 44 | this will attempt to use charmhelpers.fetch.apt_install to install it. |
2014 | 41 | """ | 45 | """ |
2015 | @@ -67,9 +71,11 @@ | |||
2016 | 67 | level=hookenv.ERROR) | 71 | level=hookenv.ERROR) |
2017 | 68 | raise e | 72 | raise e |
2018 | 69 | content = template.render(context) | 73 | content = template.render(context) |
2025 | 70 | target_dir = os.path.dirname(target) | 74 | if target is not None: |
2026 | 71 | if not os.path.exists(target_dir): | 75 | target_dir = os.path.dirname(target) |
2027 | 72 | # This is a terrible default directory permission, as the file | 76 | if not os.path.exists(target_dir): |
2028 | 73 | # or its siblings will often contain secrets. | 77 | # This is a terrible default directory permission, as the file |
2029 | 74 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | 78 | # or its siblings will often contain secrets. |
2030 | 75 | host.write_file(target, content.encode(encoding), owner, group, perms) | 79 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) |
2031 | 80 | host.write_file(target, content.encode(encoding), owner, group, perms) | ||
2032 | 81 | return content | ||
2033 | 76 | 82 | ||
2034 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
2035 | --- hooks/charmhelpers/fetch/__init__.py 2015-11-03 12:29:06 +0000 | |||
2036 | +++ hooks/charmhelpers/fetch/__init__.py 2016-02-12 19:36:45 +0000 | |||
2037 | @@ -98,6 +98,14 @@ | |||
2038 | 98 | 'liberty/proposed': 'trusty-proposed/liberty', | 98 | 'liberty/proposed': 'trusty-proposed/liberty', |
2039 | 99 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', | 99 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
2040 | 100 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | 100 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
2041 | 101 | # Mitaka | ||
2042 | 102 | 'mitaka': 'trusty-updates/mitaka', | ||
2043 | 103 | 'trusty-mitaka': 'trusty-updates/mitaka', | ||
2044 | 104 | 'trusty-mitaka/updates': 'trusty-updates/mitaka', | ||
2045 | 105 | 'trusty-updates/mitaka': 'trusty-updates/mitaka', | ||
2046 | 106 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
2047 | 107 | 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', | ||
2048 | 108 | 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', | ||
2049 | 101 | } | 109 | } |
2050 | 102 | 110 | ||
2051 | 103 | # The order of this list is very important. Handlers should be listed in from | 111 | # The order of this list is very important. Handlers should be listed in from |
2052 | @@ -411,7 +419,7 @@ | |||
2053 | 411 | importlib.import_module(package), | 419 | importlib.import_module(package), |
2054 | 412 | classname) | 420 | classname) |
2055 | 413 | plugin_list.append(handler_class()) | 421 | plugin_list.append(handler_class()) |
2057 | 414 | except (ImportError, AttributeError): | 422 | except NotImplementedError: |
2058 | 415 | # Skip missing plugins so that they can be ommitted from | 423 | # Skip missing plugins so that they can be ommitted from |
2059 | 416 | # installation if desired | 424 | # installation if desired |
2060 | 417 | log("FetchHandler {} not found, skipping plugin".format( | 425 | log("FetchHandler {} not found, skipping plugin".format( |
2061 | 418 | 426 | ||
2062 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
2063 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-11-03 12:29:06 +0000 | |||
2064 | +++ hooks/charmhelpers/fetch/archiveurl.py 2016-02-12 19:36:45 +0000 | |||
2065 | @@ -108,7 +108,7 @@ | |||
2066 | 108 | install_opener(opener) | 108 | install_opener(opener) |
2067 | 109 | response = urlopen(source) | 109 | response = urlopen(source) |
2068 | 110 | try: | 110 | try: |
2070 | 111 | with open(dest, 'w') as dest_file: | 111 | with open(dest, 'wb') as dest_file: |
2071 | 112 | dest_file.write(response.read()) | 112 | dest_file.write(response.read()) |
2072 | 113 | except Exception as e: | 113 | except Exception as e: |
2073 | 114 | if os.path.isfile(dest): | 114 | if os.path.isfile(dest): |
2074 | 115 | 115 | ||
2075 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
2076 | --- hooks/charmhelpers/fetch/bzrurl.py 2015-06-24 12:22:08 +0000 | |||
2077 | +++ hooks/charmhelpers/fetch/bzrurl.py 2016-02-12 19:36:45 +0000 | |||
2078 | @@ -15,60 +15,50 @@ | |||
2079 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2080 | 16 | 16 | ||
2081 | 17 | import os | 17 | import os |
2082 | 18 | from subprocess import check_call | ||
2083 | 18 | from charmhelpers.fetch import ( | 19 | from charmhelpers.fetch import ( |
2084 | 19 | BaseFetchHandler, | 20 | BaseFetchHandler, |
2086 | 20 | UnhandledSource | 21 | UnhandledSource, |
2087 | 22 | filter_installed_packages, | ||
2088 | 23 | apt_install, | ||
2089 | 21 | ) | 24 | ) |
2090 | 22 | from charmhelpers.core.host import mkdir | 25 | from charmhelpers.core.host import mkdir |
2091 | 23 | 26 | ||
2092 | 24 | import six | ||
2093 | 25 | if six.PY3: | ||
2094 | 26 | raise ImportError('bzrlib does not support Python3') | ||
2095 | 27 | 27 | ||
2104 | 28 | try: | 28 | if filter_installed_packages(['bzr']) != []: |
2105 | 29 | from bzrlib.branch import Branch | 29 | apt_install(['bzr']) |
2106 | 30 | from bzrlib import bzrdir, workingtree, errors | 30 | if filter_installed_packages(['bzr']) != []: |
2107 | 31 | except ImportError: | 31 | raise NotImplementedError('Unable to install bzr') |
2100 | 32 | from charmhelpers.fetch import apt_install | ||
2101 | 33 | apt_install("python-bzrlib") | ||
2102 | 34 | from bzrlib.branch import Branch | ||
2103 | 35 | from bzrlib import bzrdir, workingtree, errors | ||
2108 | 36 | 32 | ||
2109 | 37 | 33 | ||
2110 | 38 | class BzrUrlFetchHandler(BaseFetchHandler): | 34 | class BzrUrlFetchHandler(BaseFetchHandler): |
2111 | 39 | """Handler for bazaar branches via generic and lp URLs""" | 35 | """Handler for bazaar branches via generic and lp URLs""" |
2112 | 40 | def can_handle(self, source): | 36 | def can_handle(self, source): |
2113 | 41 | url_parts = self.parse_url(source) | 37 | url_parts = self.parse_url(source) |
2115 | 42 | if url_parts.scheme not in ('bzr+ssh', 'lp'): | 38 | if url_parts.scheme not in ('bzr+ssh', 'lp', ''): |
2116 | 43 | return False | 39 | return False |
2117 | 40 | elif not url_parts.scheme: | ||
2118 | 41 | return os.path.exists(os.path.join(source, '.bzr')) | ||
2119 | 44 | else: | 42 | else: |
2120 | 45 | return True | 43 | return True |
2121 | 46 | 44 | ||
2122 | 47 | def branch(self, source, dest): | 45 | def branch(self, source, dest): |
2123 | 48 | url_parts = self.parse_url(source) | ||
2124 | 49 | # If we use lp:branchname scheme we need to load plugins | ||
2125 | 50 | if not self.can_handle(source): | 46 | if not self.can_handle(source): |
2126 | 51 | raise UnhandledSource("Cannot handle {}".format(source)) | 47 | raise UnhandledSource("Cannot handle {}".format(source)) |
2141 | 52 | if url_parts.scheme == "lp": | 48 | if os.path.exists(dest): |
2142 | 53 | from bzrlib.plugin import load_plugins | 49 | check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) |
2143 | 54 | load_plugins() | 50 | else: |
2144 | 55 | try: | 51 | check_call(['bzr', 'branch', source, dest]) |
2131 | 56 | local_branch = bzrdir.BzrDir.create_branch_convenience(dest) | ||
2132 | 57 | except errors.AlreadyControlDirError: | ||
2133 | 58 | local_branch = Branch.open(dest) | ||
2134 | 59 | try: | ||
2135 | 60 | remote_branch = Branch.open(source) | ||
2136 | 61 | remote_branch.push(local_branch) | ||
2137 | 62 | tree = workingtree.WorkingTree.open(dest) | ||
2138 | 63 | tree.update() | ||
2139 | 64 | except Exception as e: | ||
2140 | 65 | raise e | ||
2145 | 66 | 52 | ||
2147 | 67 | def install(self, source): | 53 | def install(self, source, dest=None): |
2148 | 68 | url_parts = self.parse_url(source) | 54 | url_parts = self.parse_url(source) |
2149 | 69 | branch_name = url_parts.path.strip("/").split("/")[-1] | 55 | branch_name = url_parts.path.strip("/").split("/")[-1] |
2152 | 70 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | 56 | if dest: |
2153 | 71 | branch_name) | 57 | dest_dir = os.path.join(dest, branch_name) |
2154 | 58 | else: | ||
2155 | 59 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | ||
2156 | 60 | branch_name) | ||
2157 | 61 | |||
2158 | 72 | if not os.path.exists(dest_dir): | 62 | if not os.path.exists(dest_dir): |
2159 | 73 | mkdir(dest_dir, perms=0o755) | 63 | mkdir(dest_dir, perms=0o755) |
2160 | 74 | try: | 64 | try: |
2161 | 75 | 65 | ||
2162 | === modified file 'hooks/charmhelpers/fetch/giturl.py' | |||
2163 | --- hooks/charmhelpers/fetch/giturl.py 2015-11-03 12:29:06 +0000 | |||
2164 | +++ hooks/charmhelpers/fetch/giturl.py 2016-02-12 19:36:45 +0000 | |||
2165 | @@ -15,24 +15,18 @@ | |||
2166 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2167 | 16 | 16 | ||
2168 | 17 | import os | 17 | import os |
2169 | 18 | from subprocess import check_call, CalledProcessError | ||
2170 | 18 | from charmhelpers.fetch import ( | 19 | from charmhelpers.fetch import ( |
2171 | 19 | BaseFetchHandler, | 20 | BaseFetchHandler, |
2173 | 20 | UnhandledSource | 21 | UnhandledSource, |
2174 | 22 | filter_installed_packages, | ||
2175 | 23 | apt_install, | ||
2176 | 21 | ) | 24 | ) |
2191 | 22 | from charmhelpers.core.host import mkdir | 25 | |
2192 | 23 | 26 | if filter_installed_packages(['git']) != []: | |
2193 | 24 | import six | 27 | apt_install(['git']) |
2194 | 25 | if six.PY3: | 28 | if filter_installed_packages(['git']) != []: |
2195 | 26 | raise ImportError('GitPython does not support Python 3') | 29 | raise NotImplementedError('Unable to install git') |
2182 | 27 | |||
2183 | 28 | try: | ||
2184 | 29 | from git import Repo | ||
2185 | 30 | except ImportError: | ||
2186 | 31 | from charmhelpers.fetch import apt_install | ||
2187 | 32 | apt_install("python-git") | ||
2188 | 33 | from git import Repo | ||
2189 | 34 | |||
2190 | 35 | from git.exc import GitCommandError # noqa E402 | ||
2196 | 36 | 30 | ||
2197 | 37 | 31 | ||
2198 | 38 | class GitUrlFetchHandler(BaseFetchHandler): | 32 | class GitUrlFetchHandler(BaseFetchHandler): |
2199 | @@ -40,19 +34,24 @@ | |||
2200 | 40 | def can_handle(self, source): | 34 | def can_handle(self, source): |
2201 | 41 | url_parts = self.parse_url(source) | 35 | url_parts = self.parse_url(source) |
2202 | 42 | # TODO (mattyw) no support for ssh git@ yet | 36 | # TODO (mattyw) no support for ssh git@ yet |
2204 | 43 | if url_parts.scheme not in ('http', 'https', 'git'): | 37 | if url_parts.scheme not in ('http', 'https', 'git', ''): |
2205 | 44 | return False | 38 | return False |
2206 | 39 | elif not url_parts.scheme: | ||
2207 | 40 | return os.path.exists(os.path.join(source, '.git')) | ||
2208 | 45 | else: | 41 | else: |
2209 | 46 | return True | 42 | return True |
2210 | 47 | 43 | ||
2212 | 48 | def clone(self, source, dest, branch, depth=None): | 44 | def clone(self, source, dest, branch="master", depth=None): |
2213 | 49 | if not self.can_handle(source): | 45 | if not self.can_handle(source): |
2214 | 50 | raise UnhandledSource("Cannot handle {}".format(source)) | 46 | raise UnhandledSource("Cannot handle {}".format(source)) |
2215 | 51 | 47 | ||
2218 | 52 | if depth: | 48 | if os.path.exists(dest): |
2219 | 53 | Repo.clone_from(source, dest, branch=branch, depth=depth) | 49 | cmd = ['git', '-C', dest, 'pull', source, branch] |
2220 | 54 | else: | 50 | else: |
2222 | 55 | Repo.clone_from(source, dest, branch=branch) | 51 | cmd = ['git', 'clone', source, dest, '--branch', branch] |
2223 | 52 | if depth: | ||
2224 | 53 | cmd.extend(['--depth', depth]) | ||
2225 | 54 | check_call(cmd) | ||
2226 | 56 | 55 | ||
2227 | 57 | def install(self, source, branch="master", dest=None, depth=None): | 56 | def install(self, source, branch="master", dest=None, depth=None): |
2228 | 58 | url_parts = self.parse_url(source) | 57 | url_parts = self.parse_url(source) |
2229 | @@ -62,11 +61,9 @@ | |||
2230 | 62 | else: | 61 | else: |
2231 | 63 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | 62 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
2232 | 64 | branch_name) | 63 | branch_name) |
2233 | 65 | if not os.path.exists(dest_dir): | ||
2234 | 66 | mkdir(dest_dir, perms=0o755) | ||
2235 | 67 | try: | 64 | try: |
2236 | 68 | self.clone(source, dest_dir, branch, depth) | 65 | self.clone(source, dest_dir, branch, depth) |
2238 | 69 | except GitCommandError as e: | 66 | except CalledProcessError as e: |
2239 | 70 | raise UnhandledSource(e) | 67 | raise UnhandledSource(e) |
2240 | 71 | except OSError as e: | 68 | except OSError as e: |
2241 | 72 | raise UnhandledSource(e.strerror) | 69 | raise UnhandledSource(e.strerror) |
2242 | 73 | 70 | ||
2243 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
2244 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-12 11:42:00 +0000 | |||
2245 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-12 19:36:45 +0000 | |||
2246 | @@ -121,11 +121,12 @@ | |||
2247 | 121 | 121 | ||
2248 | 122 | # Charms which should use the source config option | 122 | # Charms which should use the source config option |
2249 | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 123 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
2251 | 124 | 'ceph-osd', 'ceph-radosgw'] | 124 | 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] |
2252 | 125 | 125 | ||
2253 | 126 | # Charms which can not use openstack-origin, ie. many subordinates | 126 | # Charms which can not use openstack-origin, ie. many subordinates |
2254 | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', | 127 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
2256 | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] | 128 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', |
2257 | 129 | 'cinder-backup'] | ||
2258 | 129 | 130 | ||
2259 | 130 | if self.openstack: | 131 | if self.openstack: |
2260 | 131 | for svc in services: | 132 | for svc in services: |
2261 | @@ -225,7 +226,8 @@ | |||
2262 | 225 | self.precise_havana, self.precise_icehouse, | 226 | self.precise_havana, self.precise_icehouse, |
2263 | 226 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 227 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2264 | 227 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | 228 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2266 | 228 | self.wily_liberty) = range(12) | 229 | self.wily_liberty, self.trusty_mitaka, |
2267 | 230 | self.xenial_mitaka) = range(14) | ||
2268 | 229 | 231 | ||
2269 | 230 | releases = { | 232 | releases = { |
2270 | 231 | ('precise', None): self.precise_essex, | 233 | ('precise', None): self.precise_essex, |
2271 | @@ -237,9 +239,11 @@ | |||
2272 | 237 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 239 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
2273 | 238 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 240 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
2274 | 239 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | 241 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
2275 | 242 | ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, | ||
2276 | 240 | ('utopic', None): self.utopic_juno, | 243 | ('utopic', None): self.utopic_juno, |
2277 | 241 | ('vivid', None): self.vivid_kilo, | 244 | ('vivid', None): self.vivid_kilo, |
2279 | 242 | ('wily', None): self.wily_liberty} | 245 | ('wily', None): self.wily_liberty, |
2280 | 246 | ('xenial', None): self.xenial_mitaka} | ||
2281 | 243 | return releases[(self.series, self.openstack)] | 247 | return releases[(self.series, self.openstack)] |
2282 | 244 | 248 | ||
2283 | 245 | def _get_openstack_release_string(self): | 249 | def _get_openstack_release_string(self): |
2284 | @@ -256,6 +260,7 @@ | |||
2285 | 256 | ('utopic', 'juno'), | 260 | ('utopic', 'juno'), |
2286 | 257 | ('vivid', 'kilo'), | 261 | ('vivid', 'kilo'), |
2287 | 258 | ('wily', 'liberty'), | 262 | ('wily', 'liberty'), |
2288 | 263 | ('xenial', 'mitaka'), | ||
2289 | 259 | ]) | 264 | ]) |
2290 | 260 | if self.openstack: | 265 | if self.openstack: |
2291 | 261 | os_origin = self.openstack.split(':')[1] | 266 | os_origin = self.openstack.split(':')[1] |
charm_lint_check #384 neutron- api-odl- next for narindergupta mp285933
LINT OK: passed
Build: http:// 10.245. 162.36: 8080/job/ charm_lint_ check/384/