Merge lp:~bigdata-dev/charms/trusty/hdp-hadoop/trunk into lp:charms/trusty/hdp-hadoop

Proposed by Cory Johns
Status: Needs review
Proposed branch: lp:~bigdata-dev/charms/trusty/hdp-hadoop/trunk
Merge into: lp:charms/trusty/hdp-hadoop
Diff against target: 3106 lines (+361/-2350)
18 files modified
README.md (+3/-3)
files/scripts/terasort.sh (+15/-0)
hooks/bdutils.py (+44/-28)
hooks/bootstrap.py (+9/-0)
hooks/charmhelpers/core/fstab.py (+0/-114)
hooks/charmhelpers/core/hookenv.py (+0/-498)
hooks/charmhelpers/core/host.py (+0/-325)
hooks/charmhelpers/fetch/__init__.py (+0/-349)
hooks/charmhelpers/fetch/archiveurl.py (+0/-63)
hooks/charmhelpers/fetch/bzrurl.py (+0/-50)
hooks/charmhelpers/lib/ceph_utils.py (+0/-315)
hooks/charmhelpers/lib/cluster_utils.py (+0/-128)
hooks/charmhelpers/lib/utils.py (+0/-221)
hooks/charmhelpers/setup.py (+0/-12)
hooks/hdp-hadoop-common.py (+199/-110)
hooks/hdputils.py (+14/-12)
resources.yaml (+11/-0)
tests/01-hadoop-cluster-deployment-1.py (+66/-122)
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/hdp-hadoop/trunk
Reviewer Review Type Date Requested Status
amir sanjar (community) Approve
charmers Pending
Review via email: mp+245602@code.launchpad.net

Description of the change

Updates to support HDP 2.2

To post a comment you must log in.
Revision history for this message
amir sanjar (asanjar) :
review: Approve
41. By Cory Johns

Fix jps bug (and un-vendor charmhelpers)

Unmerged revisions

41. By Cory Johns

Fix jps bug (and un-vendor charmhelpers)

40. By amir sanjar

fixes for enabling hdp 2.2 support

39. By Cory Johns

Fixed mapreduce TODO config items and refactored setHadoopConfigXML

38. By Cory Johns

Cleaned up mapreduce test case

37. By Cory Johns

Refactor test case and fix "exit on PASS" bug in test

36. By Cory Johns

Default memory settings

35. By Cory Johns

Fixed hostname yarn property and cleaned up test case

34. By Cory Johns

Fixed mapreduce paths

33. By Cory Johns

Fixed yarn paths

32. By Cory Johns

Fixed TODO-JDK-PATH placeholder from upstream config

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'README.md'
2--- README.md 2014-09-10 18:29:03 +0000
3+++ README.md 2015-02-13 00:38:49 +0000
4@@ -1,5 +1,5 @@
5 ## Overview
6-**What is Hortonworks Apache Hadoop (HDP 2.1.3) ?**
7+**What is Hortonworks Apache Hadoop (HDP 2.2.0) ?**
8 The Apache Hadoop software library is a framework that allows for the
9 distributed processing of large data sets across clusters of computers
10 using a simple programming model.
11@@ -10,7 +10,7 @@
12 at the application layer, so delivering a highly-availabile service on top of a
13 cluster of computers, each of which may be prone to failures.
14
15-Apache Hadoop 2.4.1 consists of significant improvements over the previous
16+Apache Hadoop 2.6.x consists of significant improvements over the previous
17 stable release (hadoop-1.x).
18
19 Here is a short overview of the improvments to both HDFS and MapReduce.
20@@ -51,7 +51,7 @@
21
22 This supports deployments of Hadoop in a number of configurations.
23
24-### HDP 2.1.3 Usage #1: Combined HDFS and MapReduce
25+### HDP 2.2.0 Usage #1: Combined HDFS and MapReduce
26
27 In this configuration, the YARN ResourceManager is deployed on the same
28 service units as HDFS namenode and the HDFS datanodes also run YARN NodeManager::
29
30=== added file 'files/jujuresources-0.2.3.tar.gz'
31Binary files files/jujuresources-0.2.3.tar.gz 1970-01-01 00:00:00 +0000 and files/jujuresources-0.2.3.tar.gz 2015-02-13 00:38:49 +0000 differ
32=== added file 'files/scripts/terasort.sh'
33--- files/scripts/terasort.sh 1970-01-01 00:00:00 +0000
34+++ files/scripts/terasort.sh 2015-02-13 00:38:49 +0000
35@@ -0,0 +1,15 @@
36+#!/bin/bash
37+
38+
39+SIZE=10000
40+NUM_MAPS=100
41+NUM_REDUCES=100
42+IN_DIR=in_dir
43+OUT_DIR=out_dir
44+hadoop fs -rm -r -skipTrash ${IN_DIR} || true
45+hadoop jar /usr/hdp/current/hadoop-mapreduce-client/hadoop-mapreduce-examples.jar teragen ${SIZE} ${IN_DIR}
46+
47+sleep 20
48+
49+hadoop jar /usr/hdp/current/hadoop-mapreduce-client/hadoop-mapreduce-examples.jar terasort ${IN_DIR} ${OUT_DIR}
50+
51
52=== modified file 'hooks/bdutils.py'
53--- hooks/bdutils.py 2014-09-17 14:49:16 +0000
54+++ hooks/bdutils.py 2015-02-13 00:38:49 +0000
55@@ -1,11 +1,15 @@
56 #!/usr/bin/python
57 import os
58 import pwd
59+import sys
60 import grp
61 import subprocess
62+import hashlib
63+import shlex
64
65 from shutil import rmtree
66 from charmhelpers.core.hookenv import log
67+from charmhelpers.contrib.bigdata.utils import jps
68
69 def createPropertyElement(name, value):
70 import xml.etree.ElementTree as ET
71@@ -22,26 +26,21 @@
72 import xml.dom.minidom as minidom
73 print("==> setHadoopConfigXML ","INFO")
74 import xml.etree.ElementTree as ET
75- found = False
76 with open(xmlfileNamePath,'rb+') as f:
77 root = ET.parse(f).getroot()
78- proList = root.findall("property")
79- for p in proList:
80- if found:
81+ for p in root.findall("property"):
82+ if p.find('name').text == name:
83+ if value is None:
84+ root.remove(p)
85+ else:
86+ p.find('value').text = value
87 break
88- cList = p.getchildren()
89- for c in cList:
90- if c.text == name:
91- p.find("value").text = value
92- found = True
93- break
94+ else:
95+ if value is not None:
96+ root.append(createPropertyElement(name, value))
97
98 f.seek(0)
99- if not found:
100- root.append(createPropertyElement(name, value))
101- f.write((minidom.parseString(ET.tostring(root, encoding='UTF-8'))).toprettyxml(indent="\t"))
102- else:
103- f.write(ET.tostring(root, encoding='UTF-8'))
104+ f.write(ET.tostring(root, encoding='UTF-8'))
105 f.truncate()
106
107 def setDirPermission(path, owner, group, access):
108@@ -67,15 +66,21 @@
109 for r,d,f in os.walk(path):
110 os.chmod( r , mode)
111
112-def wgetPkg(pkgName, crypType):
113+def wgetPkg(url, dest):
114 log("==> wgetPkg ")
115- crypFileName= pkgName+'.'+crypType
116- cmd = 'wget '+pkgName
117- subprocess.call(cmd.split())
118- if crypType:
119- cmd = ['wget', crypFileName]
120- subprocess.call(cmd)
121- #TODO -- cryption validation
122+ output = subprocess.check_output(['wget', '-S', url, '-O', dest],
123+ stderr=subprocess.STDOUT)
124+ etag = [l for l in output.split('\n') if l.startswith(' ETag')]
125+ if etag:
126+ etag = etag[0].split(':')[1].strip(' "')
127+ if '-' in etag:
128+ log.warn('Multi-part ETag not supported; cannot verify checksum')
129+ return # TODO: Support multi-part ETag algorithm
130+ with open(dest) as fp:
131+ md5 = hashlib.md5(fp.read()).hexdigest()
132+ if md5 != etag:
133+ log.error('Checksum mismatch: %s != %s' % (md5, etag))
134+ sys.exit(1)
135
136 def append_bashrc(line):
137 log("==> append_bashrc","INFO")
138@@ -124,10 +129,21 @@
139 os.environ[ll[0]] = ll[1].strip().strip(';').strip("\"").strip()
140
141 def is_jvm_service_active(processname):
142- cmd=["jps"]
143- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
144- out, err = p.communicate()
145- if err == None and str(out).find(processname) != -1:
146+ return jps(processname)
147+
148+
149+def HDFS_command(command):
150+ cmd = shlex.split("su hdfs -c 'hdfs {c}'".format(c=command))
151+ return subprocess.check_output(cmd)
152+
153+def fconfigured(filename):
154+ fpath = os.path.join(os.path.sep, 'home', 'ubuntu', filename)
155+ if os.path.isfile(fpath):
156 return True
157 else:
158- return False
159\ No newline at end of file
160+ touch(fpath)
161+ return False
162+
163+def touch(fname, times=None):
164+ with open(fname, 'a'):
165+ os.utime(fname, times)
166
167=== added file 'hooks/bootstrap.py'
168--- hooks/bootstrap.py 1970-01-01 00:00:00 +0000
169+++ hooks/bootstrap.py 2015-02-13 00:38:49 +0000
170@@ -0,0 +1,9 @@
171+import subprocess
172+
173+
174+def install_charmhelpers():
175+ subprocess.check_call(['apt-get', 'install', '-yq', 'python-pip'])
176+ subprocess.check_call(['pip', 'install', 'files/jujuresources-0.2.3.tar.gz'])
177+ import jujuresources
178+ jujuresources.fetch()
179+ jujuresources.install(['pathlib', 'pyaml', 'six', 'charmhelpers'])
180
181=== removed directory 'hooks/charmhelpers'
182=== removed file 'hooks/charmhelpers/__init__.py'
183=== removed directory 'hooks/charmhelpers/core'
184=== removed file 'hooks/charmhelpers/core/__init__.py'
185=== removed file 'hooks/charmhelpers/core/fstab.py'
186--- hooks/charmhelpers/core/fstab.py 2014-07-09 12:37:36 +0000
187+++ hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
188@@ -1,114 +0,0 @@
189-#!/usr/bin/env python
190-# -*- coding: utf-8 -*-
191-
192-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
193-
194-import os
195-
196-
197-class Fstab(file):
198- """This class extends file in order to implement a file reader/writer
199- for file `/etc/fstab`
200- """
201-
202- class Entry(object):
203- """Entry class represents a non-comment line on the `/etc/fstab` file
204- """
205- def __init__(self, device, mountpoint, filesystem,
206- options, d=0, p=0):
207- self.device = device
208- self.mountpoint = mountpoint
209- self.filesystem = filesystem
210-
211- if not options:
212- options = "defaults"
213-
214- self.options = options
215- self.d = d
216- self.p = p
217-
218- def __eq__(self, o):
219- return str(self) == str(o)
220-
221- def __str__(self):
222- return "{} {} {} {} {} {}".format(self.device,
223- self.mountpoint,
224- self.filesystem,
225- self.options,
226- self.d,
227- self.p)
228-
229- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
230-
231- def __init__(self, path=None):
232- if path:
233- self._path = path
234- else:
235- self._path = self.DEFAULT_PATH
236- file.__init__(self, self._path, 'r+')
237-
238- def _hydrate_entry(self, line):
239- return Fstab.Entry(*filter(
240- lambda x: x not in ('', None),
241- line.strip("\n").split(" ")))
242-
243- @property
244- def entries(self):
245- self.seek(0)
246- for line in self.readlines():
247- try:
248- if not line.startswith("#"):
249- yield self._hydrate_entry(line)
250- except ValueError:
251- pass
252-
253- def get_entry_by_attr(self, attr, value):
254- for entry in self.entries:
255- e_attr = getattr(entry, attr)
256- if e_attr == value:
257- return entry
258- return None
259-
260- def add_entry(self, entry):
261- if self.get_entry_by_attr('device', entry.device):
262- return False
263-
264- self.write(str(entry) + '\n')
265- self.truncate()
266- return entry
267-
268- def remove_entry(self, entry):
269- self.seek(0)
270-
271- lines = self.readlines()
272-
273- found = False
274- for index, line in enumerate(lines):
275- if not line.startswith("#"):
276- if self._hydrate_entry(line) == entry:
277- found = True
278- break
279-
280- if not found:
281- return False
282-
283- lines.remove(line)
284-
285- self.seek(0)
286- self.write(''.join(lines))
287- self.truncate()
288- return True
289-
290- @classmethod
291- def remove_by_mountpoint(cls, mountpoint, path=None):
292- fstab = cls(path=path)
293- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
294- if entry:
295- return fstab.remove_entry(entry)
296- return False
297-
298- @classmethod
299- def add(cls, device, mountpoint, filesystem, options=None, path=None):
300- return cls(path=path).add_entry(Fstab.Entry(device,
301- mountpoint, filesystem,
302- options=options))
303
304=== removed file 'hooks/charmhelpers/core/hookenv.py'
305--- hooks/charmhelpers/core/hookenv.py 2014-07-09 12:37:36 +0000
306+++ hooks/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000
307@@ -1,498 +0,0 @@
308-"Interactions with the Juju environment"
309-# Copyright 2013 Canonical Ltd.
310-#
311-# Authors:
312-# Charm Helpers Developers <juju@lists.ubuntu.com>
313-
314-import os
315-import json
316-import yaml
317-import subprocess
318-import sys
319-import UserDict
320-from subprocess import CalledProcessError
321-
322-CRITICAL = "CRITICAL"
323-ERROR = "ERROR"
324-WARNING = "WARNING"
325-INFO = "INFO"
326-DEBUG = "DEBUG"
327-MARKER = object()
328-
329-cache = {}
330-
331-
332-def cached(func):
333- """Cache return values for multiple executions of func + args
334-
335- For example:
336-
337- @cached
338- def unit_get(attribute):
339- pass
340-
341- unit_get('test')
342-
343- will cache the result of unit_get + 'test' for future calls.
344- """
345- def wrapper(*args, **kwargs):
346- global cache
347- key = str((func, args, kwargs))
348- try:
349- return cache[key]
350- except KeyError:
351- res = func(*args, **kwargs)
352- cache[key] = res
353- return res
354- return wrapper
355-
356-
357-def flush(key):
358- """Flushes any entries from function cache where the
359- key is found in the function+args """
360- flush_list = []
361- for item in cache:
362- if key in item:
363- flush_list.append(item)
364- for item in flush_list:
365- del cache[item]
366-
367-
368-def log(message, level=None):
369- """Write a message to the juju log"""
370- command = ['juju-log']
371- if level:
372- command += ['-l', level]
373- command += [message]
374- subprocess.call(command)
375-
376-
377-class Serializable(UserDict.IterableUserDict):
378- """Wrapper, an object that can be serialized to yaml or json"""
379-
380- def __init__(self, obj):
381- # wrap the object
382- UserDict.IterableUserDict.__init__(self)
383- self.data = obj
384-
385- def __getattr__(self, attr):
386- # See if this object has attribute.
387- if attr in ("json", "yaml", "data"):
388- return self.__dict__[attr]
389- # Check for attribute in wrapped object.
390- got = getattr(self.data, attr, MARKER)
391- if got is not MARKER:
392- return got
393- # Proxy to the wrapped object via dict interface.
394- try:
395- return self.data[attr]
396- except KeyError:
397- raise AttributeError(attr)
398-
399- def __getstate__(self):
400- # Pickle as a standard dictionary.
401- return self.data
402-
403- def __setstate__(self, state):
404- # Unpickle into our wrapper.
405- self.data = state
406-
407- def json(self):
408- """Serialize the object to json"""
409- return json.dumps(self.data)
410-
411- def yaml(self):
412- """Serialize the object to yaml"""
413- return yaml.dump(self.data)
414-
415-
416-def execution_environment():
417- """A convenient bundling of the current execution context"""
418- context = {}
419- context['conf'] = config()
420- if relation_id():
421- context['reltype'] = relation_type()
422- context['relid'] = relation_id()
423- context['rel'] = relation_get()
424- context['unit'] = local_unit()
425- context['rels'] = relations()
426- context['env'] = os.environ
427- return context
428-
429-
430-def in_relation_hook():
431- """Determine whether we're running in a relation hook"""
432- return 'JUJU_RELATION' in os.environ
433-
434-
435-def relation_type():
436- """The scope for the current relation hook"""
437- return os.environ.get('JUJU_RELATION', None)
438-
439-
440-def relation_id():
441- """The relation ID for the current relation hook"""
442- return os.environ.get('JUJU_RELATION_ID', None)
443-
444-
445-def local_unit():
446- """Local unit ID"""
447- return os.environ['JUJU_UNIT_NAME']
448-
449-
450-def remote_unit():
451- """The remote unit for the current relation hook"""
452- return os.environ['JUJU_REMOTE_UNIT']
453-
454-
455-def service_name():
456- """The name service group this unit belongs to"""
457- return local_unit().split('/')[0]
458-
459-
460-def hook_name():
461- """The name of the currently executing hook"""
462- return os.path.basename(sys.argv[0])
463-
464-
465-class Config(dict):
466- """A Juju charm config dictionary that can write itself to
467- disk (as json) and track which values have changed since
468- the previous hook invocation.
469-
470- Do not instantiate this object directly - instead call
471- ``hookenv.config()``
472-
473- Example usage::
474-
475- >>> # inside a hook
476- >>> from charmhelpers.core import hookenv
477- >>> config = hookenv.config()
478- >>> config['foo']
479- 'bar'
480- >>> config['mykey'] = 'myval'
481- >>> config.save()
482-
483-
484- >>> # user runs `juju set mycharm foo=baz`
485- >>> # now we're inside subsequent config-changed hook
486- >>> config = hookenv.config()
487- >>> config['foo']
488- 'baz'
489- >>> # test to see if this val has changed since last hook
490- >>> config.changed('foo')
491- True
492- >>> # what was the previous value?
493- >>> config.previous('foo')
494- 'bar'
495- >>> # keys/values that we add are preserved across hooks
496- >>> config['mykey']
497- 'myval'
498- >>> # don't forget to save at the end of hook!
499- >>> config.save()
500-
501- """
502- CONFIG_FILE_NAME = '.juju-persistent-config'
503-
504- def __init__(self, *args, **kw):
505- super(Config, self).__init__(*args, **kw)
506- self._prev_dict = None
507- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
508- if os.path.exists(self.path):
509- self.load_previous()
510-
511- def load_previous(self, path=None):
512- """Load previous copy of config from disk so that current values
513- can be compared to previous values.
514-
515- :param path:
516-
517- File path from which to load the previous config. If `None`,
518- config is loaded from the default location. If `path` is
519- specified, subsequent `save()` calls will write to the same
520- path.
521-
522- """
523- self.path = path or self.path
524- with open(self.path) as f:
525- self._prev_dict = json.load(f)
526-
527- def changed(self, key):
528- """Return true if the value for this key has changed since
529- the last save.
530-
531- """
532- if self._prev_dict is None:
533- return True
534- return self.previous(key) != self.get(key)
535-
536- def previous(self, key):
537- """Return previous value for this key, or None if there
538- is no "previous" value.
539-
540- """
541- if self._prev_dict:
542- return self._prev_dict.get(key)
543- return None
544-
545- def save(self):
546- """Save this config to disk.
547-
548- Preserves items in _prev_dict that do not exist in self.
549-
550- """
551- if self._prev_dict:
552- for k, v in self._prev_dict.iteritems():
553- if k not in self:
554- self[k] = v
555- with open(self.path, 'w') as f:
556- json.dump(self, f)
557-
558-
559-@cached
560-def config(scope=None):
561- """Juju charm configuration"""
562- config_cmd_line = ['config-get']
563- if scope is not None:
564- config_cmd_line.append(scope)
565- config_cmd_line.append('--format=json')
566- try:
567- config_data = json.loads(subprocess.check_output(config_cmd_line))
568- if scope is not None:
569- return config_data
570- return Config(config_data)
571- except ValueError:
572- return None
573-
574-
575-@cached
576-def relation_get(attribute=None, unit=None, rid=None):
577- """Get relation information"""
578- _args = ['relation-get', '--format=json']
579- if rid:
580- _args.append('-r')
581- _args.append(rid)
582- _args.append(attribute or '-')
583- if unit:
584- _args.append(unit)
585- try:
586- return json.loads(subprocess.check_output(_args))
587- except ValueError:
588- return None
589- except CalledProcessError, e:
590- if e.returncode == 2:
591- return None
592- raise
593-
594-
595-def relation_set(relation_id=None, relation_settings={}, **kwargs):
596- """Set relation information for the current unit"""
597- relation_cmd_line = ['relation-set']
598- if relation_id is not None:
599- relation_cmd_line.extend(('-r', relation_id))
600- for k, v in (relation_settings.items() + kwargs.items()):
601- if v is None:
602- relation_cmd_line.append('{}='.format(k))
603- else:
604- relation_cmd_line.append('{}={}'.format(k, v))
605- subprocess.check_call(relation_cmd_line)
606- # Flush cache of any relation-gets for local unit
607- flush(local_unit())
608-
609-
610-@cached
611-def relation_ids(reltype=None):
612- """A list of relation_ids"""
613- reltype = reltype or relation_type()
614- relid_cmd_line = ['relation-ids', '--format=json']
615- if reltype is not None:
616- relid_cmd_line.append(reltype)
617- return json.loads(subprocess.check_output(relid_cmd_line)) or []
618- return []
619-
620-
621-@cached
622-def related_units(relid=None):
623- """A list of related units"""
624- relid = relid or relation_id()
625- units_cmd_line = ['relation-list', '--format=json']
626- if relid is not None:
627- units_cmd_line.extend(('-r', relid))
628- return json.loads(subprocess.check_output(units_cmd_line)) or []
629-
630-
631-@cached
632-def relation_for_unit(unit=None, rid=None):
633- """Get the json represenation of a unit's relation"""
634- unit = unit or remote_unit()
635- relation = relation_get(unit=unit, rid=rid)
636- for key in relation:
637- if key.endswith('-list'):
638- relation[key] = relation[key].split()
639- relation['__unit__'] = unit
640- return relation
641-
642-
643-@cached
644-def relations_for_id(relid=None):
645- """Get relations of a specific relation ID"""
646- relation_data = []
647- relid = relid or relation_ids()
648- for unit in related_units(relid):
649- unit_data = relation_for_unit(unit, relid)
650- unit_data['__relid__'] = relid
651- relation_data.append(unit_data)
652- return relation_data
653-
654-
655-@cached
656-def relations_of_type(reltype=None):
657- """Get relations of a specific type"""
658- relation_data = []
659- reltype = reltype or relation_type()
660- for relid in relation_ids(reltype):
661- for relation in relations_for_id(relid):
662- relation['__relid__'] = relid
663- relation_data.append(relation)
664- return relation_data
665-
666-
667-@cached
668-def relation_types():
669- """Get a list of relation types supported by this charm"""
670- charmdir = os.environ.get('CHARM_DIR', '')
671- mdf = open(os.path.join(charmdir, 'metadata.yaml'))
672- md = yaml.safe_load(mdf)
673- rel_types = []
674- for key in ('provides', 'requires', 'peers'):
675- section = md.get(key)
676- if section:
677- rel_types.extend(section.keys())
678- mdf.close()
679- return rel_types
680-
681-
682-@cached
683-def relations():
684- """Get a nested dictionary of relation data for all related units"""
685- rels = {}
686- for reltype in relation_types():
687- relids = {}
688- for relid in relation_ids(reltype):
689- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
690- for unit in related_units(relid):
691- reldata = relation_get(unit=unit, rid=relid)
692- units[unit] = reldata
693- relids[relid] = units
694- rels[reltype] = relids
695- return rels
696-
697-
698-@cached
699-def is_relation_made(relation, keys='private-address'):
700- '''
701- Determine whether a relation is established by checking for
702- presence of key(s). If a list of keys is provided, they
703- must all be present for the relation to be identified as made
704- '''
705- if isinstance(keys, str):
706- keys = [keys]
707- for r_id in relation_ids(relation):
708- for unit in related_units(r_id):
709- context = {}
710- for k in keys:
711- context[k] = relation_get(k, rid=r_id,
712- unit=unit)
713- if None not in context.values():
714- return True
715- return False
716-
717-
718-def open_port(port, protocol="TCP"):
719- """Open a service network port"""
720- _args = ['open-port']
721- _args.append('{}/{}'.format(port, protocol))
722- subprocess.check_call(_args)
723-
724-
725-def close_port(port, protocol="TCP"):
726- """Close a service network port"""
727- _args = ['close-port']
728- _args.append('{}/{}'.format(port, protocol))
729- subprocess.check_call(_args)
730-
731-
732-@cached
733-def unit_get(attribute):
734- """Get the unit ID for the remote unit"""
735- _args = ['unit-get', '--format=json', attribute]
736- try:
737- return json.loads(subprocess.check_output(_args))
738- except ValueError:
739- return None
740-
741-
742-def unit_private_ip():
743- """Get this unit's private IP address"""
744- return unit_get('private-address')
745-
746-
747-class UnregisteredHookError(Exception):
748- """Raised when an undefined hook is called"""
749- pass
750-
751-
752-class Hooks(object):
753- """A convenient handler for hook functions.
754-
755- Example:
756- hooks = Hooks()
757-
758- # register a hook, taking its name from the function name
759- @hooks.hook()
760- def install():
761- ...
762-
763- # register a hook, providing a custom hook name
764- @hooks.hook("config-changed")
765- def config_changed():
766- ...
767-
768- if __name__ == "__main__":
769- # execute a hook based on the name the program is called by
770- hooks.execute(sys.argv)
771- """
772-
773- def __init__(self):
774- super(Hooks, self).__init__()
775- self._hooks = {}
776-
777- def register(self, name, function):
778- """Register a hook"""
779- self._hooks[name] = function
780-
781- def execute(self, args):
782- """Execute a registered hook based on args[0]"""
783- hook_name = os.path.basename(args[0])
784- if hook_name in self._hooks:
785- self._hooks[hook_name]()
786- else:
787- raise UnregisteredHookError(hook_name)
788-
789- def hook(self, *hook_names):
790- """Decorator, registering them as hooks"""
791- def wrapper(decorated):
792- for hook_name in hook_names:
793- self.register(hook_name, decorated)
794- else:
795- self.register(decorated.__name__, decorated)
796- if '_' in decorated.__name__:
797- self.register(
798- decorated.__name__.replace('_', '-'), decorated)
799- return decorated
800- return wrapper
801-
802-
803-def charm_dir():
804- """Return the root directory of the current charm"""
805- return os.environ.get('CHARM_DIR')
806
807=== removed file 'hooks/charmhelpers/core/host.py'
808--- hooks/charmhelpers/core/host.py 2014-07-09 12:37:36 +0000
809+++ hooks/charmhelpers/core/host.py 1970-01-01 00:00:00 +0000
810@@ -1,325 +0,0 @@
811-"""Tools for working with the host system"""
812-# Copyright 2012 Canonical Ltd.
813-#
814-# Authors:
815-# Nick Moffitt <nick.moffitt@canonical.com>
816-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
817-
818-import os
819-import pwd
820-import grp
821-import random
822-import string
823-import subprocess
824-import hashlib
825-import apt_pkg
826-
827-from collections import OrderedDict
828-
829-from hookenv import log
830-from fstab import Fstab
831-
832-
833-def service_start(service_name):
834- """Start a system service"""
835- return service('start', service_name)
836-
837-
838-def service_stop(service_name):
839- """Stop a system service"""
840- return service('stop', service_name)
841-
842-
843-def service_restart(service_name):
844- """Restart a system service"""
845- return service('restart', service_name)
846-
847-
848-def service_reload(service_name, restart_on_failure=False):
849- """Reload a system service, optionally falling back to restart if
850- reload fails"""
851- service_result = service('reload', service_name)
852- if not service_result and restart_on_failure:
853- service_result = service('restart', service_name)
854- return service_result
855-
856-
857-def service(action, service_name):
858- """Control a system service"""
859- cmd = ['service', service_name, action]
860- return subprocess.call(cmd) == 0
861-
862-
863-def service_running(service):
864- """Determine whether a system service is running"""
865- try:
866- output = subprocess.check_output(['service', service, 'status'])
867- except subprocess.CalledProcessError:
868- return False
869- else:
870- if ("start/running" in output or "is running" in output):
871- return True
872- else:
873- return False
874-
875-
876-def adduser(username, password=None, shell='/bin/bash', system_user=False):
877- """Add a user to the system"""
878- try:
879- user_info = pwd.getpwnam(username)
880- log('user {0} already exists!'.format(username))
881- except KeyError:
882- log('creating user {0}'.format(username))
883- cmd = ['useradd']
884- if system_user or password is None:
885- cmd.append('--system')
886- else:
887- cmd.extend([
888- '--create-home',
889- '--shell', shell,
890- '--password', password,
891- ])
892- cmd.append(username)
893- subprocess.check_call(cmd)
894- user_info = pwd.getpwnam(username)
895- return user_info
896-
897-
898-def add_user_to_group(username, group):
899- """Add a user to a group"""
900- cmd = [
901- 'gpasswd', '-a',
902- username,
903- group
904- ]
905- log("Adding user {} to group {}".format(username, group))
906- subprocess.check_call(cmd)
907-
908-
909-def rsync(from_path, to_path, flags='-r', options=None):
910- """Replicate the contents of a path"""
911- options = options or ['--delete', '--executability']
912- cmd = ['/usr/bin/rsync', flags]
913- cmd.extend(options)
914- cmd.append(from_path)
915- cmd.append(to_path)
916- log(" ".join(cmd))
917- return subprocess.check_output(cmd).strip()
918-
919-
920-def symlink(source, destination):
921- """Create a symbolic link"""
922- log("Symlinking {} as {}".format(source, destination))
923- cmd = [
924- 'ln',
925- '-sf',
926- source,
927- destination,
928- ]
929- subprocess.check_call(cmd)
930-
931-
932-def mkdir(path, owner='root', group='root', perms=0555, force=False):
933- """Create a directory"""
934- log("Making dir {} {}:{} {:o}".format(path, owner, group,
935- perms))
936- uid = pwd.getpwnam(owner).pw_uid
937- gid = grp.getgrnam(group).gr_gid
938- realpath = os.path.abspath(path)
939- if os.path.exists(realpath):
940- if force and not os.path.isdir(realpath):
941- log("Removing non-directory file {} prior to mkdir()".format(path))
942- os.unlink(realpath)
943- else:
944- os.makedirs(realpath, perms)
945- os.chown(realpath, uid, gid)
946-
947-
948-def write_file(path, content, owner='root', group='root', perms=0444):
949- """Create or overwrite a file with the contents of a string"""
950- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
951- uid = pwd.getpwnam(owner).pw_uid
952- gid = grp.getgrnam(group).gr_gid
953- with open(path, 'w') as target:
954- os.fchown(target.fileno(), uid, gid)
955- os.fchmod(target.fileno(), perms)
956- target.write(content)
957-
958-
959-def fstab_remove(mp):
960- """Remove the given mountpoint entry from /etc/fstab
961- """
962- return Fstab.remove_by_mountpoint(mp)
963-
964-
965-def fstab_add(dev, mp, fs, options=None):
966- """Adds the given device entry to the /etc/fstab file
967- """
968- return Fstab.add(dev, mp, fs, options=options)
969-
970-
971-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
972- """Mount a filesystem at a particular mountpoint"""
973- cmd_args = ['mount']
974- if options is not None:
975- cmd_args.extend(['-o', options])
976- cmd_args.extend([device, mountpoint])
977- try:
978- subprocess.check_output(cmd_args)
979- except subprocess.CalledProcessError, e:
980- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
981- return False
982-
983- if persist:
984- return fstab_add(device, mountpoint, filesystem, options=options)
985- return True
986-
987-
988-def umount(mountpoint, persist=False):
989- """Unmount a filesystem"""
990- cmd_args = ['umount', mountpoint]
991- try:
992- subprocess.check_output(cmd_args)
993- except subprocess.CalledProcessError, e:
994- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
995- return False
996-
997- if persist:
998- return fstab_remove(mountpoint)
999- return True
1000-
1001-
1002-def mounts():
1003- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
1004- with open('/proc/mounts') as f:
1005- # [['/mount/point','/dev/path'],[...]]
1006- system_mounts = [m[1::-1] for m in [l.strip().split()
1007- for l in f.readlines()]]
1008- return system_mounts
1009-
1010-
1011-def file_hash(path):
1012- """Generate a md5 hash of the contents of 'path' or None if not found """
1013- if os.path.exists(path):
1014- h = hashlib.md5()
1015- with open(path, 'r') as source:
1016- h.update(source.read()) # IGNORE:E1101 - it does have update
1017- return h.hexdigest()
1018- else:
1019- return None
1020-
1021-
1022-def restart_on_change(restart_map, stopstart=False):
1023- """Restart services based on configuration files changing
1024-
1025- This function is used a decorator, for example
1026-
1027- @restart_on_change({
1028- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
1029- })
1030- def ceph_client_changed():
1031- ...
1032-
1033- In this example, the cinder-api and cinder-volume services
1034- would be restarted if /etc/ceph/ceph.conf is changed by the
1035- ceph_client_changed function.
1036- """
1037- def wrap(f):
1038- def wrapped_f(*args):
1039- checksums = {}
1040- for path in restart_map:
1041- checksums[path] = file_hash(path)
1042- f(*args)
1043- restarts = []
1044- for path in restart_map:
1045- if checksums[path] != file_hash(path):
1046- restarts += restart_map[path]
1047- services_list = list(OrderedDict.fromkeys(restarts))
1048- if not stopstart:
1049- for service_name in services_list:
1050- service('restart', service_name)
1051- else:
1052- for action in ['stop', 'start']:
1053- for service_name in services_list:
1054- service(action, service_name)
1055- return wrapped_f
1056- return wrap
1057-
1058-
1059-def lsb_release():
1060- """Return /etc/lsb-release in a dict"""
1061- d = {}
1062- with open('/etc/lsb-release', 'r') as lsb:
1063- for l in lsb:
1064- k, v = l.split('=')
1065- d[k.strip()] = v.strip()
1066- return d
1067-
1068-
1069-def pwgen(length=None):
1070- """Generate a random pasword."""
1071- if length is None:
1072- length = random.choice(range(35, 45))
1073- alphanumeric_chars = [
1074- l for l in (string.letters + string.digits)
1075- if l not in 'l0QD1vAEIOUaeiou']
1076- random_chars = [
1077- random.choice(alphanumeric_chars) for _ in range(length)]
1078- return(''.join(random_chars))
1079-
1080-
1081-def list_nics(nic_type):
1082- '''Return a list of nics of given type(s)'''
1083- if isinstance(nic_type, basestring):
1084- int_types = [nic_type]
1085- else:
1086- int_types = nic_type
1087- interfaces = []
1088- for int_type in int_types:
1089- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
1090- ip_output = subprocess.check_output(cmd).split('\n')
1091- ip_output = (line for line in ip_output if line)
1092- for line in ip_output:
1093- if line.split()[1].startswith(int_type):
1094- interfaces.append(line.split()[1].replace(":", ""))
1095- return interfaces
1096-
1097-
1098-def set_nic_mtu(nic, mtu):
1099- '''Set MTU on a network interface'''
1100- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
1101- subprocess.check_call(cmd)
1102-
1103-
1104-def get_nic_mtu(nic):
1105- cmd = ['ip', 'addr', 'show', nic]
1106- ip_output = subprocess.check_output(cmd).split('\n')
1107- mtu = ""
1108- for line in ip_output:
1109- words = line.split()
1110- if 'mtu' in words:
1111- mtu = words[words.index("mtu") + 1]
1112- return mtu
1113-
1114-
1115-def get_nic_hwaddr(nic):
1116- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
1117- ip_output = subprocess.check_output(cmd)
1118- hwaddr = ""
1119- words = ip_output.split()
1120- if 'link/ether' in words:
1121- hwaddr = words[words.index('link/ether') + 1]
1122- return hwaddr
1123-
1124-
1125-def cmp_pkgrevno(package, revno, pkgcache=None):
1126- '''Compare supplied revno with the revno of the installed package
1127- 1 => Installed revno is greater than supplied arg
1128- 0 => Installed revno is the same as supplied arg
1129- -1 => Installed revno is less than supplied arg
1130- '''
1131- if not pkgcache:
1132- apt_pkg.init()
1133- pkgcache = apt_pkg.Cache()
1134- pkg = pkgcache[package]
1135- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
1136
1137=== removed directory 'hooks/charmhelpers/fetch'
1138=== removed file 'hooks/charmhelpers/fetch/__init__.py'
1139--- hooks/charmhelpers/fetch/__init__.py 2014-07-09 12:37:36 +0000
1140+++ hooks/charmhelpers/fetch/__init__.py 1970-01-01 00:00:00 +0000
1141@@ -1,349 +0,0 @@
1142-import importlib
1143-import time
1144-from yaml import safe_load
1145-from charmhelpers.core.host import (
1146- lsb_release
1147-)
1148-from urlparse import (
1149- urlparse,
1150- urlunparse,
1151-)
1152-import subprocess
1153-from charmhelpers.core.hookenv import (
1154- config,
1155- log,
1156-)
1157-import apt_pkg
1158-import os
1159-
1160-
1161-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
1162-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
1163-"""
1164-PROPOSED_POCKET = """# Proposed
1165-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
1166-"""
1167-CLOUD_ARCHIVE_POCKETS = {
1168- # Folsom
1169- 'folsom': 'precise-updates/folsom',
1170- 'precise-folsom': 'precise-updates/folsom',
1171- 'precise-folsom/updates': 'precise-updates/folsom',
1172- 'precise-updates/folsom': 'precise-updates/folsom',
1173- 'folsom/proposed': 'precise-proposed/folsom',
1174- 'precise-folsom/proposed': 'precise-proposed/folsom',
1175- 'precise-proposed/folsom': 'precise-proposed/folsom',
1176- # Grizzly
1177- 'grizzly': 'precise-updates/grizzly',
1178- 'precise-grizzly': 'precise-updates/grizzly',
1179- 'precise-grizzly/updates': 'precise-updates/grizzly',
1180- 'precise-updates/grizzly': 'precise-updates/grizzly',
1181- 'grizzly/proposed': 'precise-proposed/grizzly',
1182- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
1183- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
1184- # Havana
1185- 'havana': 'precise-updates/havana',
1186- 'precise-havana': 'precise-updates/havana',
1187- 'precise-havana/updates': 'precise-updates/havana',
1188- 'precise-updates/havana': 'precise-updates/havana',
1189- 'havana/proposed': 'precise-proposed/havana',
1190- 'precise-havana/proposed': 'precise-proposed/havana',
1191- 'precise-proposed/havana': 'precise-proposed/havana',
1192- # Icehouse
1193- 'icehouse': 'precise-updates/icehouse',
1194- 'precise-icehouse': 'precise-updates/icehouse',
1195- 'precise-icehouse/updates': 'precise-updates/icehouse',
1196- 'precise-updates/icehouse': 'precise-updates/icehouse',
1197- 'icehouse/proposed': 'precise-proposed/icehouse',
1198- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
1199- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
1200- # Juno
1201- 'juno': 'trusty-updates/juno',
1202- 'trusty-juno': 'trusty-updates/juno',
1203- 'trusty-juno/updates': 'trusty-updates/juno',
1204- 'trusty-updates/juno': 'trusty-updates/juno',
1205- 'juno/proposed': 'trusty-proposed/juno',
1206- 'juno/proposed': 'trusty-proposed/juno',
1207- 'trusty-juno/proposed': 'trusty-proposed/juno',
1208- 'trusty-proposed/juno': 'trusty-proposed/juno',
1209-}
1210-
1211-# The order of this list is very important. Handlers should be listed in from
1212-# least- to most-specific URL matching.
1213-FETCH_HANDLERS = (
1214- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
1215- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
1216-)
1217-
1218-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
1219-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
1220-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
1221-
1222-
1223-class SourceConfigError(Exception):
1224- pass
1225-
1226-
1227-class UnhandledSource(Exception):
1228- pass
1229-
1230-
1231-class AptLockError(Exception):
1232- pass
1233-
1234-
1235-class BaseFetchHandler(object):
1236-
1237- """Base class for FetchHandler implementations in fetch plugins"""
1238-
1239- def can_handle(self, source):
1240- """Returns True if the source can be handled. Otherwise returns
1241- a string explaining why it cannot"""
1242- return "Wrong source type"
1243-
1244- def install(self, source):
1245- """Try to download and unpack the source. Return the path to the
1246- unpacked files or raise UnhandledSource."""
1247- raise UnhandledSource("Wrong source type {}".format(source))
1248-
1249- def parse_url(self, url):
1250- return urlparse(url)
1251-
1252- def base_url(self, url):
1253- """Return url without querystring or fragment"""
1254- parts = list(self.parse_url(url))
1255- parts[4:] = ['' for i in parts[4:]]
1256- return urlunparse(parts)
1257-
1258-
1259-def filter_installed_packages(packages):
1260- """Returns a list of packages that require installation"""
1261- apt_pkg.init()
1262-
1263- # Tell apt to build an in-memory cache to prevent race conditions (if
1264- # another process is already building the cache).
1265- apt_pkg.config.set("Dir::Cache::pkgcache", "")
1266-
1267- cache = apt_pkg.Cache()
1268- _pkgs = []
1269- for package in packages:
1270- try:
1271- p = cache[package]
1272- p.current_ver or _pkgs.append(package)
1273- except KeyError:
1274- log('Package {} has no installation candidate.'.format(package),
1275- level='WARNING')
1276- _pkgs.append(package)
1277- return _pkgs
1278-
1279-
1280-def apt_install(packages, options=None, fatal=False):
1281- """Install one or more packages"""
1282- if options is None:
1283- options = ['--option=Dpkg::Options::=--force-confold']
1284-
1285- cmd = ['apt-get', '--assume-yes']
1286- cmd.extend(options)
1287- cmd.append('install')
1288- if isinstance(packages, basestring):
1289- cmd.append(packages)
1290- else:
1291- cmd.extend(packages)
1292- log("Installing {} with options: {}".format(packages,
1293- options))
1294- _run_apt_command(cmd, fatal)
1295-
1296-
1297-def apt_upgrade(options=None, fatal=False, dist=False):
1298- """Upgrade all packages"""
1299- if options is None:
1300- options = ['--option=Dpkg::Options::=--force-confold']
1301-
1302- cmd = ['apt-get', '--assume-yes']
1303- cmd.extend(options)
1304- if dist:
1305- cmd.append('dist-upgrade')
1306- else:
1307- cmd.append('upgrade')
1308- log("Upgrading with options: {}".format(options))
1309- _run_apt_command(cmd, fatal)
1310-
1311-
1312-def apt_update(fatal=False):
1313- """Update local apt cache"""
1314- cmd = ['apt-get', 'update']
1315- _run_apt_command(cmd, fatal)
1316-
1317-
1318-def apt_purge(packages, fatal=False):
1319- """Purge one or more packages"""
1320- cmd = ['apt-get', '--assume-yes', 'purge']
1321- if isinstance(packages, basestring):
1322- cmd.append(packages)
1323- else:
1324- cmd.extend(packages)
1325- log("Purging {}".format(packages))
1326- _run_apt_command(cmd, fatal)
1327-
1328-
1329-def apt_hold(packages, fatal=False):
1330- """Hold one or more packages"""
1331- cmd = ['apt-mark', 'hold']
1332- if isinstance(packages, basestring):
1333- cmd.append(packages)
1334- else:
1335- cmd.extend(packages)
1336- log("Holding {}".format(packages))
1337-
1338- if fatal:
1339- subprocess.check_call(cmd)
1340- else:
1341- subprocess.call(cmd)
1342-
1343-
1344-def add_source(source, key=None):
1345- if source is None:
1346- log('Source is not present. Skipping')
1347- return
1348-
1349- if (source.startswith('ppa:') or
1350- source.startswith('http') or
1351- source.startswith('deb ') or
1352- source.startswith('cloud-archive:')):
1353- subprocess.check_call(['add-apt-repository', '--yes', source])
1354- elif source.startswith('cloud:'):
1355- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
1356- fatal=True)
1357- pocket = source.split(':')[-1]
1358- if pocket not in CLOUD_ARCHIVE_POCKETS:
1359- raise SourceConfigError(
1360- 'Unsupported cloud: source option %s' %
1361- pocket)
1362- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
1363- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
1364- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
1365- elif source == 'proposed':
1366- release = lsb_release()['DISTRIB_CODENAME']
1367- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
1368- apt.write(PROPOSED_POCKET.format(release))
1369- if key:
1370- subprocess.check_call(['apt-key', 'adv', '--keyserver',
1371- 'hkp://keyserver.ubuntu.com:80', '--recv',
1372- key])
1373-
1374-
1375-def configure_sources(update=False,
1376- sources_var='install_sources',
1377- keys_var='install_keys'):
1378- """
1379- Configure multiple sources from charm configuration
1380-
1381- Example config:
1382- install_sources:
1383- - "ppa:foo"
1384- - "http://example.com/repo precise main"
1385- install_keys:
1386- - null
1387- - "a1b2c3d4"
1388-
1389- Note that 'null' (a.k.a. None) should not be quoted.
1390- """
1391- sources = safe_load(config(sources_var))
1392- keys = config(keys_var)
1393- if keys is not None:
1394- keys = safe_load(keys)
1395- if isinstance(sources, basestring) and (
1396- keys is None or isinstance(keys, basestring)):
1397- add_source(sources, keys)
1398- else:
1399- if not len(sources) == len(keys):
1400- msg = 'Install sources and keys lists are different lengths'
1401- raise SourceConfigError(msg)
1402- for src_num in range(len(sources)):
1403- add_source(sources[src_num], keys[src_num])
1404- if update:
1405- apt_update(fatal=True)
1406-
1407-
1408-def install_remote(source):
1409- """
1410- Install a file tree from a remote source
1411-
1412- The specified source should be a url of the form:
1413- scheme://[host]/path[#[option=value][&...]]
1414-
1415- Schemes supported are based on this modules submodules
1416- Options supported are submodule-specific"""
1417- # We ONLY check for True here because can_handle may return a string
1418- # explaining why it can't handle a given source.
1419- handlers = [h for h in plugins() if h.can_handle(source) is True]
1420- installed_to = None
1421- for handler in handlers:
1422- try:
1423- installed_to = handler.install(source)
1424- except UnhandledSource:
1425- pass
1426- if not installed_to:
1427- raise UnhandledSource("No handler found for source {}".format(source))
1428- return installed_to
1429-
1430-
1431-def install_from_config(config_var_name):
1432- charm_config = config()
1433- source = charm_config[config_var_name]
1434- return install_remote(source)
1435-
1436-
1437-def plugins(fetch_handlers=None):
1438- if not fetch_handlers:
1439- fetch_handlers = FETCH_HANDLERS
1440- plugin_list = []
1441- for handler_name in fetch_handlers:
1442- package, classname = handler_name.rsplit('.', 1)
1443- try:
1444- handler_class = getattr(
1445- importlib.import_module(package),
1446- classname)
1447- plugin_list.append(handler_class())
1448- except (ImportError, AttributeError):
1449- # Skip missing plugins so that they can be ommitted from
1450- # installation if desired
1451- log("FetchHandler {} not found, skipping plugin".format(
1452- handler_name))
1453- return plugin_list
1454-
1455-
1456-def _run_apt_command(cmd, fatal=False):
1457- """
1458- Run an APT command, checking output and retrying if the fatal flag is set
1459- to True.
1460-
1461- :param: cmd: str: The apt command to run.
1462- :param: fatal: bool: Whether the command's output should be checked and
1463- retried.
1464- """
1465- env = os.environ.copy()
1466-
1467- if 'DEBIAN_FRONTEND' not in env:
1468- env['DEBIAN_FRONTEND'] = 'noninteractive'
1469-
1470- if fatal:
1471- retry_count = 0
1472- result = None
1473-
1474- # If the command is considered "fatal", we need to retry if the apt
1475- # lock was not acquired.
1476-
1477- while result is None or result == APT_NO_LOCK:
1478- try:
1479- result = subprocess.check_call(cmd, env=env)
1480- except subprocess.CalledProcessError, e:
1481- retry_count = retry_count + 1
1482- if retry_count > APT_NO_LOCK_RETRY_COUNT:
1483- raise
1484- result = e.returncode
1485- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
1486- "".format(APT_NO_LOCK_RETRY_DELAY))
1487- time.sleep(APT_NO_LOCK_RETRY_DELAY)
1488-
1489- else:
1490- subprocess.call(cmd, env=env)
1491
1492=== removed file 'hooks/charmhelpers/fetch/archiveurl.py'
1493--- hooks/charmhelpers/fetch/archiveurl.py 2014-07-09 12:37:36 +0000
1494+++ hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000
1495@@ -1,63 +0,0 @@
1496-import os
1497-import urllib2
1498-import urlparse
1499-
1500-from charmhelpers.fetch import (
1501- BaseFetchHandler,
1502- UnhandledSource
1503-)
1504-from charmhelpers.payload.archive import (
1505- get_archive_handler,
1506- extract,
1507-)
1508-from charmhelpers.core.host import mkdir
1509-
1510-
1511-class ArchiveUrlFetchHandler(BaseFetchHandler):
1512- """Handler for archives via generic URLs"""
1513- def can_handle(self, source):
1514- url_parts = self.parse_url(source)
1515- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
1516- return "Wrong source type"
1517- if get_archive_handler(self.base_url(source)):
1518- return True
1519- return False
1520-
1521- def download(self, source, dest):
1522- # propogate all exceptions
1523- # URLError, OSError, etc
1524- proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
1525- if proto in ('http', 'https'):
1526- auth, barehost = urllib2.splituser(netloc)
1527- if auth is not None:
1528- source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
1529- username, password = urllib2.splitpasswd(auth)
1530- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
1531- # Realm is set to None in add_password to force the username and password
1532- # to be used whatever the realm
1533- passman.add_password(None, source, username, password)
1534- authhandler = urllib2.HTTPBasicAuthHandler(passman)
1535- opener = urllib2.build_opener(authhandler)
1536- urllib2.install_opener(opener)
1537- response = urllib2.urlopen(source)
1538- try:
1539- with open(dest, 'w') as dest_file:
1540- dest_file.write(response.read())
1541- except Exception as e:
1542- if os.path.isfile(dest):
1543- os.unlink(dest)
1544- raise e
1545-
1546- def install(self, source):
1547- url_parts = self.parse_url(source)
1548- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
1549- if not os.path.exists(dest_dir):
1550- mkdir(dest_dir, perms=0755)
1551- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
1552- try:
1553- self.download(source, dld_file)
1554- except urllib2.URLError as e:
1555- raise UnhandledSource(e.reason)
1556- except OSError as e:
1557- raise UnhandledSource(e.strerror)
1558- return extract(dld_file)
1559
1560=== removed file 'hooks/charmhelpers/fetch/bzrurl.py'
1561--- hooks/charmhelpers/fetch/bzrurl.py 2014-07-09 12:37:36 +0000
1562+++ hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000
1563@@ -1,50 +0,0 @@
1564-import os
1565-from charmhelpers.fetch import (
1566- BaseFetchHandler,
1567- UnhandledSource
1568-)
1569-from charmhelpers.core.host import mkdir
1570-
1571-try:
1572- from bzrlib.branch import Branch
1573-except ImportError:
1574- from charmhelpers.fetch import apt_install
1575- apt_install("python-bzrlib")
1576- from bzrlib.branch import Branch
1577-
1578-
1579-class BzrUrlFetchHandler(BaseFetchHandler):
1580- """Handler for bazaar branches via generic and lp URLs"""
1581- def can_handle(self, source):
1582- url_parts = self.parse_url(source)
1583- if url_parts.scheme not in ('bzr+ssh', 'lp'):
1584- return False
1585- else:
1586- return True
1587-
1588- def branch(self, source, dest):
1589- url_parts = self.parse_url(source)
1590- # If we use lp:branchname scheme we need to load plugins
1591- if not self.can_handle(source):
1592- raise UnhandledSource("Cannot handle {}".format(source))
1593- if url_parts.scheme == "lp":
1594- from bzrlib.plugin import load_plugins
1595- load_plugins()
1596- try:
1597- remote_branch = Branch.open(source)
1598- remote_branch.bzrdir.sprout(dest).open_branch()
1599- except Exception as e:
1600- raise e
1601-
1602- def install(self, source):
1603- url_parts = self.parse_url(source)
1604- branch_name = url_parts.path.strip("/").split("/")[-1]
1605- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1606- branch_name)
1607- if not os.path.exists(dest_dir):
1608- mkdir(dest_dir, perms=0755)
1609- try:
1610- self.branch(source, dest_dir)
1611- except OSError as e:
1612- raise UnhandledSource(e.strerror)
1613- return dest_dir
1614
1615=== removed directory 'hooks/charmhelpers/lib'
1616=== removed file 'hooks/charmhelpers/lib/__init__.py'
1617=== removed file 'hooks/charmhelpers/lib/ceph_utils.py'
1618--- hooks/charmhelpers/lib/ceph_utils.py 2014-07-09 12:37:36 +0000
1619+++ hooks/charmhelpers/lib/ceph_utils.py 1970-01-01 00:00:00 +0000
1620@@ -1,315 +0,0 @@
1621-#
1622-# Copyright 2012 Canonical Ltd.
1623-#
1624-# This file is sourced from lp:openstack-charm-helpers
1625-#
1626-# Authors:
1627-# James Page <james.page@ubuntu.com>
1628-# Adam Gandelman <adamg@ubuntu.com>
1629-#
1630-
1631-import commands
1632-import json
1633-import subprocess
1634-import os
1635-import shutil
1636-import time
1637-import lib.utils as utils
1638-
1639-KEYRING = '/etc/ceph/ceph.client.%s.keyring'
1640-KEYFILE = '/etc/ceph/ceph.client.%s.key'
1641-
1642-CEPH_CONF = """[global]
1643- auth supported = %(auth)s
1644- keyring = %(keyring)s
1645- mon host = %(mon_hosts)s
1646- log to syslog = %(use_syslog)s
1647- err to syslog = %(use_syslog)s
1648- clog to syslog = %(use_syslog)s
1649-"""
1650-
1651-
1652-def execute(cmd):
1653- subprocess.check_call(cmd)
1654-
1655-
1656-def execute_shell(cmd):
1657- subprocess.check_call(cmd, shell=True)
1658-
1659-
1660-def install():
1661- ceph_dir = "/etc/ceph"
1662- if not os.path.isdir(ceph_dir):
1663- os.mkdir(ceph_dir)
1664- utils.install('ceph-common')
1665-
1666-
1667-def rbd_exists(service, pool, rbd_img):
1668- (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
1669- (service, pool))
1670- return rbd_img in out
1671-
1672-
1673-def create_rbd_image(service, pool, image, sizemb):
1674- cmd = [
1675- 'rbd',
1676- 'create',
1677- image,
1678- '--size',
1679- str(sizemb),
1680- '--id',
1681- service,
1682- '--pool',
1683- pool]
1684- execute(cmd)
1685-
1686-
1687-def pool_exists(service, name):
1688- (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
1689- return name in out
1690-
1691-
1692-def ceph_version():
1693- ''' Retrieve the local version of ceph '''
1694- if os.path.exists('/usr/bin/ceph'):
1695- cmd = ['ceph', '-v']
1696- output = subprocess.check_output(cmd)
1697- output = output.split()
1698- if len(output) > 3:
1699- return output[2]
1700- else:
1701- return None
1702- else:
1703- return None
1704-
1705-
1706-def get_osds(service):
1707- '''
1708- Return a list of all Ceph Object Storage Daemons
1709- currently in the cluster
1710- '''
1711- version = ceph_version()
1712- if version and version >= '0.56':
1713- cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
1714- return json.loads(subprocess.check_output(cmd))
1715- else:
1716- return None
1717-
1718-
1719-def create_pool(service, name, replicas=2):
1720- ''' Create a new RADOS pool '''
1721- if pool_exists(service, name):
1722- utils.juju_log('WARNING',
1723- "Ceph pool {} already exists, "
1724- "skipping creation".format(name))
1725- return
1726-
1727- osds = get_osds(service)
1728- if osds:
1729- pgnum = (len(osds) * 100 / replicas)
1730- else:
1731- # NOTE(james-page): Default to 200 for older ceph versions
1732- # which don't support OSD query from cli
1733- pgnum = 200
1734-
1735- cmd = [
1736- 'ceph', '--id', service,
1737- 'osd', 'pool', 'create',
1738- name, str(pgnum)
1739- ]
1740- subprocess.check_call(cmd)
1741- cmd = [
1742- 'ceph', '--id', service,
1743- 'osd', 'pool', 'set', name,
1744- 'size', str(replicas)
1745- ]
1746- subprocess.check_call(cmd)
1747-
1748-
1749-def keyfile_path(service):
1750- return KEYFILE % service
1751-
1752-
1753-def keyring_path(service):
1754- return KEYRING % service
1755-
1756-
1757-def create_keyring(service, key):
1758- keyring = keyring_path(service)
1759- if os.path.exists(keyring):
1760- utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
1761- cmd = [
1762- 'ceph-authtool',
1763- keyring,
1764- '--create-keyring',
1765- '--name=client.%s' % service,
1766- '--add-key=%s' % key]
1767- execute(cmd)
1768- utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
1769-
1770-
1771-def create_key_file(service, key):
1772- # create a file containing the key
1773- keyfile = keyfile_path(service)
1774- if os.path.exists(keyfile):
1775- utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
1776- fd = open(keyfile, 'w')
1777- fd.write(key)
1778- fd.close()
1779- utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
1780-
1781-
1782-def get_ceph_nodes():
1783- hosts = []
1784- for r_id in utils.relation_ids('ceph'):
1785- for unit in utils.relation_list(r_id):
1786- hosts.append(utils.relation_get('private-address',
1787- unit=unit, rid=r_id))
1788- return hosts
1789-
1790-
1791-def configure(service, key, auth, use_syslog):
1792- create_keyring(service, key)
1793- create_key_file(service, key)
1794- hosts = get_ceph_nodes()
1795- mon_hosts = ",".join(map(str, hosts))
1796- keyring = keyring_path(service)
1797- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
1798- ceph_conf.write(CEPH_CONF % locals())
1799- modprobe_kernel_module('rbd')
1800-
1801-
1802-def image_mapped(image_name):
1803- (rc, out) = commands.getstatusoutput('rbd showmapped')
1804- return image_name in out
1805-
1806-
1807-def map_block_storage(service, pool, image):
1808- cmd = [
1809- 'rbd',
1810- 'map',
1811- '%s/%s' % (pool, image),
1812- '--user',
1813- service,
1814- '--secret',
1815- keyfile_path(service)]
1816- execute(cmd)
1817-
1818-
1819-def filesystem_mounted(fs):
1820- return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
1821-
1822-
1823-def make_filesystem(blk_device, fstype='ext4'):
1824- count = 0
1825- e_noent = os.errno.ENOENT
1826- while not os.path.exists(blk_device):
1827- if count >= 10:
1828- utils.juju_log('ERROR', 'ceph: gave up waiting on block '
1829- 'device %s' % blk_device)
1830- raise IOError(e_noent, os.strerror(e_noent), blk_device)
1831- utils.juju_log('INFO', 'ceph: waiting for block device %s '
1832- 'to appear' % blk_device)
1833- count += 1
1834- time.sleep(1)
1835- else:
1836- utils.juju_log('INFO', 'ceph: Formatting block device %s '
1837- 'as filesystem %s.' % (blk_device, fstype))
1838- execute(['mkfs', '-t', fstype, blk_device])
1839-
1840-
1841-def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
1842- # mount block device into /mnt
1843- cmd = ['mount', '-t', fstype, blk_device, '/mnt']
1844- execute(cmd)
1845-
1846- # copy data to /mnt
1847- try:
1848- copy_files(data_src_dst, '/mnt')
1849- except:
1850- pass
1851-
1852- # umount block device
1853- cmd = ['umount', '/mnt']
1854- execute(cmd)
1855-
1856- _dir = os.stat(data_src_dst)
1857- uid = _dir.st_uid
1858- gid = _dir.st_gid
1859-
1860- # re-mount where the data should originally be
1861- cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
1862- execute(cmd)
1863-
1864- # ensure original ownership of new mount.
1865- cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
1866- execute(cmd)
1867-
1868-
1869-# TODO: re-use
1870-def modprobe_kernel_module(module):
1871- utils.juju_log('INFO', 'Loading kernel module')
1872- cmd = ['modprobe', module]
1873- execute(cmd)
1874- cmd = 'echo %s >> /etc/modules' % module
1875- execute_shell(cmd)
1876-
1877-
1878-def copy_files(src, dst, symlinks=False, ignore=None):
1879- for item in os.listdir(src):
1880- s = os.path.join(src, item)
1881- d = os.path.join(dst, item)
1882- if os.path.isdir(s):
1883- shutil.copytree(s, d, symlinks, ignore)
1884- else:
1885- shutil.copy2(s, d)
1886-
1887-
1888-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
1889- blk_device, fstype, system_services=[],
1890- rbd_pool_replicas=2):
1891- """
1892- To be called from the current cluster leader.
1893- Ensures given pool and RBD image exists, is mapped to a block device,
1894- and the device is formatted and mounted at the given mount_point.
1895-
1896- If formatting a device for the first time, data existing at mount_point
1897- will be migrated to the RBD device before being remounted.
1898-
1899- All services listed in system_services will be stopped prior to data
1900- migration and restarted when complete.
1901- """
1902- # Ensure pool, RBD image, RBD mappings are in place.
1903- if not pool_exists(service, pool):
1904- utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
1905- create_pool(service, pool, replicas=rbd_pool_replicas)
1906-
1907- if not rbd_exists(service, pool, rbd_img):
1908- utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
1909- create_rbd_image(service, pool, rbd_img, sizemb)
1910-
1911- if not image_mapped(rbd_img):
1912- utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
1913- map_block_storage(service, pool, rbd_img)
1914-
1915- # make file system
1916- # TODO: What happens if for whatever reason this is run again and
1917- # the data is already in the rbd device and/or is mounted??
1918- # When it is mounted already, it will fail to make the fs
1919- # XXX: This is really sketchy! Need to at least add an fstab entry
1920- # otherwise this hook will blow away existing data if its executed
1921- # after a reboot.
1922- if not filesystem_mounted(mount_point):
1923- make_filesystem(blk_device, fstype)
1924-
1925- for svc in system_services:
1926- if utils.running(svc):
1927- utils.juju_log('INFO',
1928- 'Stopping services %s prior to migrating '
1929- 'data' % svc)
1930- utils.stop(svc)
1931-
1932- place_data_on_ceph(service, blk_device, mount_point, fstype)
1933-
1934- for svc in system_services:
1935- utils.start(svc)
1936
1937=== removed file 'hooks/charmhelpers/lib/cluster_utils.py'
1938--- hooks/charmhelpers/lib/cluster_utils.py 2014-07-09 12:37:36 +0000
1939+++ hooks/charmhelpers/lib/cluster_utils.py 1970-01-01 00:00:00 +0000
1940@@ -1,128 +0,0 @@
1941-#
1942-# Copyright 2012 Canonical Ltd.
1943-#
1944-# This file is sourced from lp:openstack-charm-helpers
1945-#
1946-# Authors:
1947-# James Page <james.page@ubuntu.com>
1948-# Adam Gandelman <adamg@ubuntu.com>
1949-#
1950-
1951-from utils import (
1952- juju_log,
1953- relation_ids,
1954- relation_list,
1955- relation_get,
1956- get_unit_hostname,
1957- config_get)
1958-import subprocess
1959-import os
1960-
1961-
1962-def is_clustered():
1963- for r_id in (relation_ids('ha') or []):
1964- for unit in (relation_list(r_id) or []):
1965- clustered = relation_get('clustered',
1966- rid=r_id,
1967- unit=unit)
1968- if clustered:
1969- return True
1970- return False
1971-
1972-
1973-def is_leader(resource):
1974- cmd = [
1975- "crm", "resource",
1976- "show", resource]
1977- try:
1978- status = subprocess.check_output(cmd)
1979- except subprocess.CalledProcessError:
1980- return False
1981- else:
1982- if get_unit_hostname() in status:
1983- return True
1984- else:
1985- return False
1986-
1987-
1988-def peer_units():
1989- peers = []
1990- for r_id in (relation_ids('cluster') or []):
1991- for unit in (relation_list(r_id) or []):
1992- peers.append(unit)
1993- return peers
1994-
1995-
1996-def oldest_peer(peers):
1997- local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
1998- for peer in peers:
1999- remote_unit_no = peer.split('/')[1]
2000- if remote_unit_no < local_unit_no:
2001- return False
2002- return True
2003-
2004-
2005-def eligible_leader(resource):
2006- if is_clustered():
2007- if not is_leader(resource):
2008- juju_log('INFO', 'Deferring action to CRM leader.')
2009- return False
2010- else:
2011- peers = peer_units()
2012- if peers and not oldest_peer(peers):
2013- juju_log('INFO', 'Deferring action to oldest service unit.')
2014- return False
2015- return True
2016-
2017-
2018-def https():
2019- '''
2020- Determines whether enough data has been provided in configuration
2021- or relation data to configure HTTPS
2022- .
2023- returns: boolean
2024- '''
2025- if config_get('use-https') == "yes":
2026- return True
2027- if config_get('ssl_cert') and config_get('ssl_key'):
2028- return True
2029- for r_id in relation_ids('identity-service'):
2030- for unit in relation_list(r_id):
2031- if (relation_get('https_keystone', rid=r_id, unit=unit) and
2032- relation_get('ssl_cert', rid=r_id, unit=unit) and
2033- relation_get('ssl_key', rid=r_id, unit=unit) and
2034- relation_get('ca_cert', rid=r_id, unit=unit)):
2035- return True
2036- return False
2037-
2038-
2039-def determine_api_port(public_port):
2040- '''
2041- Determine correct API server listening port based on
2042- existence of HTTPS reverse proxy and/or haproxy.
2043-
2044- public_port: int: standard public port for given service
2045-
2046- returns: int: the correct listening port for the API service
2047- '''
2048- i = 0
2049- if len(peer_units()) > 0 or is_clustered():
2050- i += 1
2051- if https():
2052- i += 1
2053- return public_port - (i * 10)
2054-
2055-
2056-def determine_haproxy_port(public_port):
2057- '''
2058- Description: Determine correct proxy listening port based on public IP +
2059- existence of HTTPS reverse proxy.
2060-
2061- public_port: int: standard public port for given service
2062-
2063- returns: int: the correct listening port for the HAProxy service
2064- '''
2065- i = 0
2066- if https():
2067- i += 1
2068- return public_port - (i * 10)
2069
2070=== removed file 'hooks/charmhelpers/lib/utils.py'
2071--- hooks/charmhelpers/lib/utils.py 2014-07-09 12:37:36 +0000
2072+++ hooks/charmhelpers/lib/utils.py 1970-01-01 00:00:00 +0000
2073@@ -1,221 +0,0 @@
2074-#
2075-# Copyright 2012 Canonical Ltd.
2076-#
2077-# This file is sourced from lp:openstack-charm-helpers
2078-#
2079-# Authors:
2080-# James Page <james.page@ubuntu.com>
2081-# Paul Collins <paul.collins@canonical.com>
2082-# Adam Gandelman <adamg@ubuntu.com>
2083-#
2084-
2085-import json
2086-import os
2087-import subprocess
2088-import socket
2089-import sys
2090-
2091-
2092-def do_hooks(hooks):
2093- hook = os.path.basename(sys.argv[0])
2094-
2095- try:
2096- hook_func = hooks[hook]
2097- except KeyError:
2098- juju_log('INFO',
2099- "This charm doesn't know how to handle '{}'.".format(hook))
2100- else:
2101- hook_func()
2102-
2103-
2104-def install(*pkgs):
2105- cmd = [
2106- 'apt-get',
2107- '-y',
2108- 'install']
2109- for pkg in pkgs:
2110- cmd.append(pkg)
2111- subprocess.check_call(cmd)
2112-
2113-TEMPLATES_DIR = 'templates'
2114-
2115-try:
2116- import jinja2
2117-except ImportError:
2118- install('python-jinja2')
2119- import jinja2
2120-
2121-try:
2122- import dns.resolver
2123-except ImportError:
2124- install('python-dnspython')
2125- import dns.resolver
2126-
2127-
2128-def render_template(template_name, context, template_dir=TEMPLATES_DIR):
2129- templates = jinja2.Environment(loader=jinja2.FileSystemLoader(
2130- template_dir))
2131- template = templates.get_template(template_name)
2132- return template.render(context)
2133-
2134-# Protocols
2135-TCP = 'TCP'
2136-UDP = 'UDP'
2137-
2138-
2139-def expose(port, protocol='TCP'):
2140- cmd = [
2141- 'open-port',
2142- '{}/{}'.format(port, protocol)]
2143- subprocess.check_call(cmd)
2144-
2145-
2146-def juju_log(severity, message):
2147- cmd = [
2148- 'juju-log',
2149- '--log-level', severity,
2150- message]
2151- subprocess.check_call(cmd)
2152-
2153-
2154-def relation_ids(relation):
2155- cmd = [
2156- 'relation-ids',
2157- relation]
2158- result = str(subprocess.check_output(cmd)).split()
2159- if result == "":
2160- return None
2161- else:
2162- return result
2163-
2164-
2165-def relation_list(rid):
2166- cmd = [
2167- 'relation-list',
2168- '-r', rid]
2169- result = str(subprocess.check_output(cmd)).split()
2170- if result == "":
2171- return None
2172- else:
2173- return result
2174-
2175-
2176-def relation_get(attribute, unit=None, rid=None):
2177- cmd = [
2178- 'relation-get']
2179- if rid:
2180- cmd.append('-r')
2181- cmd.append(rid)
2182- cmd.append(attribute)
2183- if unit:
2184- cmd.append(unit)
2185- value = subprocess.check_output(cmd).strip() # IGNORE:E1103
2186- if value == "":
2187- return None
2188- else:
2189- return value
2190-
2191-
2192-def relation_set(**kwargs):
2193- cmd = [
2194- 'relation-set']
2195- args = []
2196- for k, v in kwargs.items():
2197- if k == 'rid':
2198- if v:
2199- cmd.append('-r')
2200- cmd.append(v)
2201- else:
2202- args.append('{}={}'.format(k, v))
2203- cmd += args
2204- subprocess.check_call(cmd)
2205-
2206-
2207-def unit_get(attribute):
2208- cmd = [
2209- 'unit-get',
2210- attribute]
2211- value = subprocess.check_output(cmd).strip() # IGNORE:E1103
2212- if value == "":
2213- return None
2214- else:
2215- return value
2216-
2217-
2218-def config_get(attribute):
2219- cmd = [
2220- 'config-get',
2221- '--format',
2222- 'json']
2223- out = subprocess.check_output(cmd).strip() # IGNORE:E1103
2224- cfg = json.loads(out)
2225-
2226- try:
2227- return cfg[attribute]
2228- except KeyError:
2229- return None
2230-
2231-
2232-def get_unit_hostname():
2233- return socket.gethostname()
2234-
2235-
2236-def get_host_ip(hostname=unit_get('private-address')):
2237- try:
2238- # Test to see if already an IPv4 address
2239- socket.inet_aton(hostname)
2240- return hostname
2241- except socket.error:
2242- answers = dns.resolver.query(hostname, 'A')
2243- if answers:
2244- return answers[0].address
2245- return None
2246-
2247-
2248-def _svc_control(service, action):
2249- subprocess.check_call(['service', service, action])
2250-
2251-
2252-def restart(*services):
2253- for service in services:
2254- _svc_control(service, 'restart')
2255-
2256-
2257-def stop(*services):
2258- for service in services:
2259- _svc_control(service, 'stop')
2260-
2261-
2262-def start(*services):
2263- for service in services:
2264- _svc_control(service, 'start')
2265-
2266-
2267-def reload(*services):
2268- for service in services:
2269- try:
2270- _svc_control(service, 'reload')
2271- except subprocess.CalledProcessError:
2272- # Reload failed - either service does not support reload
2273- # or it was not running - restart will fixup most things
2274- _svc_control(service, 'restart')
2275-
2276-
2277-def running(service):
2278- try:
2279- output = subprocess.check_output(['service', service, 'status'])
2280- except subprocess.CalledProcessError:
2281- return False
2282- else:
2283- if ("start/running" in output or "is running" in output):
2284- return True
2285- else:
2286- return False
2287-
2288-
2289-def is_relation_made(relation, key='private-address'):
2290- for r_id in (relation_ids(relation) or []):
2291- for unit in (relation_list(r_id) or []):
2292- if relation_get(key, rid=r_id, unit=unit):
2293- return True
2294- return False
2295
2296=== removed file 'hooks/charmhelpers/setup.py'
2297--- hooks/charmhelpers/setup.py 2014-07-09 12:37:36 +0000
2298+++ hooks/charmhelpers/setup.py 1970-01-01 00:00:00 +0000
2299@@ -1,12 +0,0 @@
2300-#!/usr/bin/env python
2301-
2302-from distutils.core import setup
2303-
2304-setup(name='charmhelpers',
2305- version='1.0',
2306- description='this is dumb',
2307- author='nobody',
2308- author_email='dummy@amulet',
2309- url='http://google.com',
2310- packages=[],
2311-)
2312
2313=== modified file 'hooks/hdp-hadoop-common.py'
2314--- hooks/hdp-hadoop-common.py 2014-11-21 19:35:29 +0000
2315+++ hooks/hdp-hadoop-common.py 2015-02-13 00:38:49 +0000
2316@@ -1,19 +1,28 @@
2317 #!/usr/bin/env python
2318 import os
2319+import re
2320 import subprocess
2321+import pwd
2322 import sys
2323 import tarfile
2324 import shlex
2325 import shutil
2326 import inspect
2327 import time
2328+from functools import partial
2329+import socket
2330+
2331+import bootstrap
2332+bootstrap.install_charmhelpers()
2333
2334 from hdputils import install_base_pkg, updateHDPDirectoryScript, config_all_nodes, \
2335- setHadoopEnvVar, home, hdpScript, configureJAVA, config_all_nodes
2336-from bdutils import setDirPermission, fileSetKV, append_bashrc, is_jvm_service_active, setHadoopConfigXML, chownRecursive
2337+ setHadoopEnvVar, home, hdpScript, configureJAVA, config_all_nodes, \
2338+ javaPath
2339+from bdutils import setDirPermission, fileSetKV, append_bashrc, is_jvm_service_active, setHadoopConfigXML, \
2340+ chownRecursive, HDFS_command, fconfigured
2341
2342-from charmhelpers.lib.utils import config_get, get_unit_hostname
2343-from shutil import rmtree, copyfile
2344+from shutil import rmtree, copyfile, copy
2345+from charmhelpers.core import hookenv
2346 from charmhelpers.core.hookenv import log, Hooks, relation_get, relation_set, unit_get, open_port, local_unit, related_units
2347 from charmhelpers.core.host import service_start, service_stop, add_user_to_group
2348 from time import sleep
2349@@ -68,27 +77,71 @@
2350 # in config.yaml
2351 def updateDirectoriesScript():
2352 log("==> {}".format(inspect.stack()[0][3]),"INFO")
2353- fileSetKV(directoriesScript, "DFS_NAME_DIR=", "\""+config_get('dfs_name_dir')+"\";")
2354- fileSetKV(directoriesScript, "DFS_DATA_DIR=","\""+ config_get('dfs_data_dir')+"\";")
2355- fileSetKV(directoriesScript, "FS_CHECKPOINT_DIR=", "\""+config_get('fs_checkpoint_dir')+"\";")
2356- fileSetKV(directoriesScript, "YARN_LOCAL_DIR=", "\""+config_get('yarn_local_dir')+"\";")
2357- fileSetKV(directoriesScript, "YARN_LOCAL_LOG_DIR=", "\""+config_get('yarn_local_log_dir')+"\";")
2358- fileSetKV(directoriesScript, "ZOOKEEPER_DATA_DIR=", "\""+config_get('zookeeper_data_dir')+"\";")
2359+ fileSetKV(directoriesScript, "DFS_NAME_DIR=", "\""+hookenv.config('dfs_name_dir')+"\";")
2360+ fileSetKV(directoriesScript, "DFS_DATA_DIR=","\""+ hookenv.config('dfs_data_dir')+"\";")
2361+ fileSetKV(directoriesScript, "FS_CHECKPOINT_DIR=", "\""+hookenv.config('fs_checkpoint_dir')+"\";")
2362+ fileSetKV(directoriesScript, "YARN_LOCAL_DIR=", "\""+hookenv.config('yarn_local_dir')+"\";")
2363+ fileSetKV(directoriesScript, "YARN_LOCAL_LOG_DIR=", "\""+hookenv.config('yarn_local_log_dir')+"\";")
2364+ fileSetKV(directoriesScript, "ZOOKEEPER_DATA_DIR=", "\""+hookenv.config('zookeeper_data_dir')+"\";")
2365
2366
2367 def createHDPHadoopConf():
2368 log("==> {}".format(inspect.stack()[0][3]),"INFO")
2369
2370+ edits = {
2371+ 'hadoop-env.sh': {
2372+ r'TODO-JDK-PATH': javaPath,
2373+ },
2374+ 'yarn-env.sh': {
2375+ r'TODO-JDK-PATH': javaPath,
2376+ },
2377+ }
2378+
2379 HADOOP_CONF_DIR = os.environ["HADOOP_CONF_DIR"]
2380 HDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "core_hadoop")
2381 source = os.listdir(HDPConfPath)
2382 for files in source:
2383 srcFile = os.path.join(os.path.sep, HDPConfPath, files)
2384 desFile = os.path.join(os.path.sep, HADOOP_CONF_DIR, files)
2385- shutil.copyfile(srcFile, desFile)
2386+ edit_file(srcFile, desFile, edits.get(files))
2387 subprocess.call(createDadoopConfDir)
2388
2389
2390+def edit_file(src, dst=None, edits=None):
2391+ """
2392+ Copy file from src to dst, applying one or more per-line edits.
2393+
2394+ :param str src: Path of source file to copy from
2395+ :param str dst: Path of destination file. If None or the same as `src`,
2396+ the edits will be made in-place.
2397+ :param dict edits: A mapping of regular expression patterns to regular
2398+ expression replacement patterns (using backreferences).
2399+ Any matching replacements will be made for each line.
2400+ If the order of replacements is important, an instance
2401+ of the `OrderedDict` class from the `collections`
2402+ package should be used.
2403+ """
2404+ if not edits:
2405+ if not dst or dst == src:
2406+ return # in-place edit with no changes is a noop
2407+ shutil.copyfile(src, dst) # no edits is just a copy
2408+ return
2409+ cleanup = False
2410+ if not dst or dst == src:
2411+ dst = src
2412+ src = '{}.bak'.format(src)
2413+ os.rename(dst, src)
2414+ cleanup = True
2415+ with open(src, 'r') as infile:
2416+ with open(dst, 'w') as outfile:
2417+ for line in infile.readlines():
2418+ for pat, repl in edits.iteritems():
2419+ line = re.sub(pat, repl, line)
2420+ outfile.write(line)
2421+ if cleanup:
2422+ os.remove(src)
2423+
2424+
2425 def uninstall_base_pkg():
2426 log("==> {}".format(inspect.stack()[0][3]),"INFO")
2427 packages = ['ntp',
2428@@ -104,7 +157,6 @@
2429 'liblzo2-2',
2430 'liblzo2-dev',
2431 'libhdfs0',
2432- 'libhdfs0-dev',
2433 'hadoop-lzo']
2434 apt_purge(packages)
2435 shutil.rmtree(hdpScriptPath)
2436@@ -130,26 +182,41 @@
2437 setDirPermission(os.environ['MAPRED_PID_DIR'], os.environ['MAPRED_USER'], group, 0755)
2438 setDirPermission(os.environ['YARN_LOCAL_DIR'], os.environ['YARN_USER'], group, 0755)
2439 setDirPermission(os.environ['YARN_LOCAL_LOG_DIR'], os.environ['YARN_USER'], group, 0755)
2440- hdfsConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],'hdfs-site.xml')
2441- setHadoopConfigXML(hdfsConfPath, "dfs.namenode.name.dir", config_get('dfs_name_dir'))
2442- setHadoopConfigXML(hdfsConfPath, "dfs.datanode.data.dir", config_get('dfs_data_dir'))
2443- setHadoopConfigXML(hdfsConfPath, "dfs.namenode.checkpoint.dir", config_get('fs_checkpoint_dir'))
2444+ hdfsConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'], 'hdfs-site.xml')
2445+ setHadoopConfigXML(hdfsConfPath, "dfs.namenode.name.dir", hookenv.config('dfs_name_dir'))
2446+ setHadoopConfigXML(hdfsConfPath, "dfs.datanode.data.dir", hookenv.config('dfs_data_dir'))
2447+ setHadoopConfigXML(hdfsConfPath, "dfs.namenode.checkpoint.dir", hookenv.config('fs_checkpoint_dir'))
2448+ yarnConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'], 'yarn-site.xml')
2449+ setHadoopConfigXML(yarnConfPath, "yarn.scheduler.maximum-allocation-mb", "2048")
2450+ setHadoopConfigXML(yarnConfPath, "yarn.scheduler.minimum-allocation-mb", "682")
2451+ setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.resource.cpu-vcores", "2")
2452+ setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.resource.memory-mb", "2048")
2453+ setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.resource.percentage-physical-cpu-limit", "100")
2454+ mapredConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'], 'mapred-site.xml')
2455+# setHadoopConfigXML(mapredConfPath, "mapreduce.application.framework.path", None)
2456+ setHadoopConfigXML(mapredConfPath, "mapreduce.map.memory.mb", "682")
2457+ setHadoopConfigXML(mapredConfPath, "mapreduce.reduce.memory.mb", "682")
2458+ setHadoopConfigXML(mapredConfPath, "mapreduce.map.java.opts", "-Xmx546m")
2459+ setHadoopConfigXML(mapredConfPath, "mapreduce.reduce.java.opts", "-Xmx546m")
2460+ setHadoopConfigXML(mapredConfPath, "mapreduce.task.io.sort.factor", "100")
2461+ setHadoopConfigXML(mapredConfPath, "mapreduce.task.io.sort.mb", "273")
2462+ setHadoopConfigXML(mapredConfPath, "yarn.app.mapreduce.am.resource.mb", "682")
2463
2464 # candidate for BD charm helper
2465 def format_namenode(hdfsUser):
2466 log("==> hdfs format for user={}".format(hdfsUser),"INFO")
2467- cmd = shlex.split("su {} -c '/usr/lib/hadoop/bin/hadoop namenode -format'".format(hdfsUser))
2468+ cmd = shlex.split("su {} -c '/usr/hdp/current/hadoop-client/bin/hadoop namenode -format'".format(hdfsUser))
2469 subprocess.call(cmd)
2470
2471 def callHDFS_fs(command):
2472- cmd = shlex.split("su hdfs -c '/usr/lib/hadoop/bin/hadoop fs {}'".format(command))
2473+ cmd = shlex.split("su hdfs -c '/usr/hdp/current/hadoop-client/bin/hadoop fs {}'".format(command))
2474 subprocess.call(cmd)
2475
2476
2477 ''' Start Jobhistory Server '''
2478 def start_jobhistory():
2479 log("==> Start Job History Server")
2480- path = os.path.join(os.path.sep, 'usr', 'lib', 'hadoop-yarn', 'bin', 'container-executor')
2481+ path = '/usr/hdp/current/hadoop-yarn-client/bin/container-executor'
2482 chownRecursive(path, 'root', 'hadoop')
2483 os.chmod(path, 650)
2484 callHDFS_fs("-mkdir -p /mr-history/tmp")
2485@@ -161,16 +228,16 @@
2486 callHDFS_fs("-chmod -R 1777 /app-logs ")
2487 callHDFS_fs("-chown yarn /app-logs ")
2488 hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2489- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2490- cmd = shlex.split("su {} -c '/usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config {} start historyserver'".\
2491+ os.environ["HADOOP_LIBEXEC_DIR"] = "/usr/hdp/current/hadoop-client/libexec"
2492+ cmd = shlex.split("su {} -c '/usr/hdp/current/hadoop-mapreduce-client/sbin/mr-jobhistory-daemon.sh --config {} start historyserver'".\
2493 format(os.environ['MAPRED_USER'], hadoopConfDir))
2494 subprocess.call(cmd)
2495
2496 '''Stop Job History Server'''
2497 def stop_jobhistory():
2498 hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2499- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2500- cmd = shlex.split("su {} -c '/usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config {} stop historyserver'".\
2501+ os.environ["HADOOP_LIBEXEC_DIR"] = "/usr/hdp/current/hadoop-client/libexec"
2502+ cmd = shlex.split("su {} -c '/usr/hdp/current/hadoop-mapreduce-client/sbin/mr-jobhistory-daemon.sh --config {} stop historyserver'".\
2503 format(os.environ['MAPRED_USER'], hadoopConfDir))
2504 subprocess.call(cmd)
2505
2506@@ -182,47 +249,49 @@
2507 stop_jobhistory()
2508 start_jobhistory()
2509
2510-''' Start Namenode Service
2511- hdfsUser: System user for Hadoop Distributed Filesystem
2512-'''
2513+
2514+def hdfs_service(service, action, username):
2515+ daemon = '/usr/hdp/current/hadoop-hdfs-{}/../hadoop/sbin/hadoop-daemon.sh'
2516+ log("==> {} {} for user={}".format(action, service, username), "INFO")
2517+ subprocess.check_call(shlex.split(
2518+ "su {user} -c '{daemon} --config {conf_dir} {action} {service}'".format(
2519+ user=username,
2520+ daemon=daemon.format(service),
2521+ service=service,
2522+ action=action,
2523+ conf_dir=os.environ["HADOOP_CONF_DIR"])))
2524+
2525+
2526 def start_namenode(hdfsUser):
2527- log("==> start namenode for user={}".format(hdfsUser), "INFO")
2528- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2529- cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} start namenode'".\
2530- format(hdfsUser, hadoopConfDir))
2531- subprocess.check_call(cmd)
2532-
2533-'''Stop Name Node server
2534- hdfsUser: System user for Hadoop Distributed Filesystem
2535-'''
2536+ '''
2537+ Start Namenode Service
2538+ hdfsUser: System user for Hadoop Distributed Filesystem
2539+ '''
2540+ hdfs_service('namenode', 'start', hdfsUser)
2541+
2542+
2543 def stop_namenode(hdfsUser):
2544- log("==> stop namenode for user={}".format(hdfsUser), "INFO")
2545- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2546- cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} stop namenode'".\
2547- format(hdfsUser, hadoopConfDir))
2548- subprocess.call(cmd)
2549-
2550-'''
2551- Start Data Node
2552- hdfsUser: System user for Hadoop Distributed Filesystem
2553-'''
2554+ '''
2555+ Stop Name Node server
2556+ hdfsUser: System user for Hadoop Distributed Filesystem
2557+ '''
2558+ hdfs_service('namenode', 'stop', hdfsUser)
2559+
2560+
2561 def start_datanode(hdfsUser):
2562- log("==> start datanode for user={}".format(hdfsUser), "INFO")
2563- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2564- cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} start datanode'".\
2565- format(hdfsUser, hadoopConfDir))
2566- subprocess.check_call(cmd)
2567-
2568-'''
2569- Stop Data Node
2570- hdfsUser: System user for Hadoop Distributed Filesystem
2571-'''
2572+ '''
2573+ Start Data Node
2574+ hdfsUser: System user for Hadoop Distributed Filesystem
2575+ '''
2576+ hdfs_service('datanode', 'start', hdfsUser)
2577+
2578+
2579 def stop_datanode(hdfsUser):
2580- log("==> stop datanode for user={}".format(hdfsUser), "INFO")
2581- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2582- cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} stop datanode'".\
2583- format(hdfsUser, hadoopConfDir))
2584- subprocess.call(cmd)
2585+ '''
2586+ Stop Data Node
2587+ hdfsUser: System user for Hadoop Distributed Filesystem
2588+ '''
2589+ hdfs_service('datanode', 'stop', hdfsUser)
2590 # candidate for BD charm helper
2591
2592 '''
2593@@ -238,52 +307,49 @@
2594 setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.resource-tracker.address", RMhostname+":8025")
2595 setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.scheduler.address", RMhostname+":8030")
2596 setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.address", RMhostname+":8050")
2597+ setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.hostname", RMhostname)
2598 setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.admin.address", RMhostname+":8141")
2599- setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.local-dirs", config_get('yarn_local_dir'))
2600- setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.log-dirs", config_get('yarn_local_log_dir'))
2601+ setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.local-dirs", hookenv.config('yarn_local_dir'))
2602+ setHadoopConfigXML(yarnConfPath, "yarn.nodemanager.log-dirs", hookenv.config('yarn_local_log_dir'))
2603 setHadoopConfigXML(yarnConfPath, "yarn.log.server.url","http://"+RMhostname+":19888")
2604 setHadoopConfigXML(yarnConfPath, "yarn.resourcemanager.webapp.address", RMhostname+":8088")
2605 #jobhistory server
2606 setHadoopConfigXML(mapConfDir, "mapreduce.jobhistory.webapp.address", RMhostname+":19888")
2607 setHadoopConfigXML(mapConfDir, "mapreduce.jobhistory.address", RMhostname+":10020")
2608
2609-''' Start Resource Manager server '''
2610+
2611+def yarn_service(service, action, username):
2612+ daemon = '/usr/hdp/current/hadoop-yarn-client/sbin/yarn-daemon.sh'
2613+ log("==> {} {} for user={}".format(action, service, username), "INFO")
2614+ os.environ["HADOOP_LIBEXEC_DIR"] = "/usr/hdp/current/hadoop-client/libexec"
2615+ subprocess.check_call(shlex.split(
2616+ "su {user} -c '{daemon} --config {conf_dir} {action} {service}'".format(
2617+ user=username,
2618+ daemon=daemon.format(service),
2619+ service=service,
2620+ action=action,
2621+ conf_dir=os.environ["HADOOP_CONF_DIR"])))
2622+
2623+
2624 def start_resourcemanager(yarnUser):
2625- log("==> start resourcemanager", "INFO")
2626- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2627- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2628- cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} start resourcemanager'".\
2629- format(yarnUser, hadoopConfDir))
2630- subprocess.call(cmd)
2631-
2632-''' Stop Resource Manager server '''
2633+ ''' Start Resource Manager server '''
2634+ yarn_service('resourcemanager', 'start', yarnUser)
2635+
2636+
2637 def stop_resourcemanager(yarnUser):
2638- log("==> stop resourcemanager", "INFO")
2639- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2640- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2641- cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} stop resourcemanager'".\
2642- format(yarnUser, hadoopConfDir))
2643- subprocess.check_call(cmd)
2644-
2645-''' Start Node Manager daemon on each compute node '''
2646+ ''' Stop Resource Manager server '''
2647+ yarn_service('resourcemanager', 'stop', yarnUser)
2648+
2649+
2650 def start_nodemanager(yarnUser):
2651- log("==> start nodemanager", "INFO")
2652- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2653- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2654- cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} start nodemanager'".\
2655- format(yarnUser, hadoopConfDir))
2656- subprocess.check_call(cmd)
2657-
2658-
2659-''' Stop Node Manager daemon on each compute node '''
2660+ ''' Start Node Manager daemon on each compute node '''
2661+ yarn_service('nodemanager', 'start', yarnUser)
2662+
2663
2664 def stop_nodemanager(yarnUser):
2665- log("==> stop nodemanager", "INFO")
2666- hadoopConfDir = os.environ["HADOOP_CONF_DIR"]
2667- os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec"
2668- cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} stop nodemanager'".\
2669- format(yarnUser, hadoopConfDir))
2670- subprocess.call(cmd)
2671+ ''' Stop Node Manager daemon on each compute node '''
2672+ yarn_service('nodemanager', 'stop', yarnUser)
2673+
2674
2675 '''Stop all running hadoop services
2676 NOTE: Order is important - DO NOT CHANGE
2677@@ -336,6 +402,8 @@
2678 break
2679
2680 def configureHDFS(hostname):
2681+ cmd = "/usr/bin/hdp-select set all 2.2.0.0-2041"
2682+ subprocess.check_call(shlex.split(cmd))
2683 hdfsConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],'hdfs-site.xml')
2684 coreConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],'core-site.xml')
2685 setHadoopConfigXML(coreConfPath, "fs.defaultFS", "hdfs://"+hostname+":8020")
2686@@ -361,11 +429,10 @@
2687 'liblzo2-2',
2688 'liblzo2-dev',
2689 'libhdfs0',
2690- 'libhdfs0-dev',
2691 'hadoop-lzo']
2692 install_base_pkg(packages)
2693 config_hadoop_nodes()
2694- fileSetKV(hosts_path, unit_get('private-address')+' ', get_unit_hostname())
2695+ fileSetKV(hosts_path, unit_get('private-address')+' ', socket.gethostname())
2696
2697
2698 @hooks.hook('resourcemanager-relation-joined')
2699@@ -373,17 +440,23 @@
2700 log ("==> resourcemanager-relation-joined","INFO")
2701 if is_jvm_service_active("ResourceManager"):
2702 relation_set(resourceManagerReady=True)
2703- relation_set(resourceManager_hostname=get_unit_hostname())
2704+ relation_set(resourceManager_hostname=socket.gethostname())
2705 return
2706+ # waiting for namenode, however this will not work on a distributed hdfs system.
2707 if not is_jvm_service_active("NameNode"):
2708 sys.exit(0)
2709+ shutil.copy(os.path.join(os.path.sep, os.environ['CHARM_DIR'],\
2710+ 'files', 'scripts', "terasort.sh"), home)
2711 setHadoopEnvVar()
2712 relation_set(resourceManager_ip=unit_get('private-address'))
2713- relation_set(resourceManager_hostname=get_unit_hostname())
2714+ relation_set(resourceManager_hostname=socket.gethostname())
2715 configureYarn(unit_get('private-address'))
2716 start_resourcemanager(os.environ["YARN_USER"])
2717 start_jobhistory()
2718 open_port(8025)
2719+ open_port(8050)
2720+ open_port(8020)
2721+ open_port(8042)
2722 open_port(8030)
2723 open_port(8050)
2724 open_port(8141)
2725@@ -414,7 +487,9 @@
2726 open_port(19888)
2727 open_port(8088)
2728 open_port(10020)
2729- relation_set(nodemanager_hostname=get_unit_hostname())
2730+ open_port(8042)
2731+
2732+ relation_set(nodemanager_hostname=socket.gethostname())
2733
2734
2735 @hooks.hook('resourcemanager-relation-broken')
2736@@ -437,25 +512,32 @@
2737
2738 if is_jvm_service_active("NameNode"):
2739 relation_set(nameNodeReady=True)
2740- relation_set(namenode_hostname=get_unit_hostname())
2741+ relation_set(namenode_hostname=socket.gethostname())
2742 return
2743 setHadoopEnvVar()
2744 setDirPermission(os.environ['DFS_NAME_DIR'], os.environ['HDFS_USER'], os.environ['HADOOP_GROUP'], 0755)
2745- relation_set(namenode_hostname=get_unit_hostname())
2746+ relation_set(namenode_hostname=socket.gethostname())
2747 configureHDFS(unit_get('private-address'))
2748 format_namenode(os.environ["HDFS_USER"])
2749 start_namenode(os.environ["HDFS_USER"])
2750+ # Namenode might not be ready yet
2751+ sleep(15)
2752+ # Create hdfs (superuser) and ubunut (default user)
2753+ HDFS_command("dfs -mkdir -p /user/hdfs")
2754+ HDFS_command("dfs -mkdir -p /user/ubuntu")
2755+ HDFS_command("dfs -chown ubuntu /user/ubuntu")
2756+ HDFS_command("dfs -chmod -R 755 /user/ubuntu")
2757 start_jobhistory()
2758 open_port(8020)
2759 open_port(8010)
2760 open_port(50070)
2761 open_port(50075)
2762 open_port(8480)
2763- open_port(50470)
2764+ open_port(50070)
2765+ open_port(45454)
2766 if not is_jvm_service_active("NameNode"):
2767 log("error ==> NameNode failed to start")
2768 sys.exit(1)
2769- sleep(5)
2770 relation_set(nameNodeReady=True)
2771
2772
2773@@ -480,7 +562,7 @@
2774 open_port(8480)
2775 open_port(50010)
2776 open_port(50075)
2777- relation_set(datanode_hostname = get_unit_hostname())
2778+ relation_set(datanode_hostname = socket.gethostname())
2779
2780
2781 @hooks.hook('config-changed')
2782@@ -499,6 +581,15 @@
2783 sys.exit(0)
2784 log("Configuring namenode - changed phase", "INFO")
2785 fileSetKV(hosts_path, relation_get('private-address')+' ', datanode_host)
2786+
2787+ # Upload the MapReduce tarball to HDFS.
2788+ if not fconfigured("mapred_init"):
2789+ HDFS_command("dfs -mkdir -p /hdp/apps/2.2.0.0-2041/mapreduce/")
2790+ HDFS_command("dfs -put /usr/hdp/current/hadoop-client/mapreduce.tar.gz /hdp/apps/2.2.0.0-2041/mapreduce/")
2791+ HDFS_command("dfs -chown -R hdfs:hadoop /hdp")
2792+ HDFS_command("dfs -chmod -R 555 /hdp/apps/2.2.0.0-2041/mapreduce")
2793+ HDFS_command("dfs -chmod -R 444 /hdp/apps/2.2.0.0-2041/mapreduce/mapreduce.tar.gz")
2794+
2795
2796 @hooks.hook('start')
2797 def start():
2798@@ -525,14 +616,14 @@
2799
2800 @hooks.hook('compute-nodes-relation-joined')
2801 def compute_nodes_relation_joined():
2802- log("==> compute_nodes_relation_joined {}".format(get_unit_hostname()),"INFO")
2803- relation_set(hostname=get_unit_hostname())
2804+ log("==> compute_nodes_relation_joined {}".format(socket.gethostname()),"INFO")
2805+ relation_set(hostname=socket.gethostname())
2806
2807
2808 @hooks.hook('hadoop-nodes-relation-joined')
2809 def hadoop_nodes_relation_joined():
2810- log("==> hadoop_nodes_relation_joined {}".format(get_unit_hostname()),"INFO")
2811- relation_set(hostname=get_unit_hostname())
2812+ log("==> hadoop_nodes_relation_joined {}".format(socket.gethostname()),"INFO")
2813+ relation_set(hostname=socket.gethostname())
2814
2815
2816
2817@@ -555,7 +646,5 @@
2818 resourceManagerReady = False
2819
2820
2821-
2822-
2823 if __name__ == "__main__":
2824 hooks.execute(sys.argv)
2825
2826=== modified file 'hooks/hdputils.py'
2827--- hooks/hdputils.py 2014-11-21 18:37:23 +0000
2828+++ hooks/hdputils.py 2015-02-13 00:38:49 +0000
2829@@ -17,24 +17,27 @@
2830
2831 def install_base_pkg(packages):
2832 log ("==> install_base_pkg", "INFO")
2833- wgetPkg("http://public-repo-1.hortonworks.com/HDP/ubuntu12/2.1.3.0/hdp.list -O /etc/apt/sources.list.d/hdp.list","")
2834- cmd =gpg_script
2835+ wgetPkg("http://public-repo-1.hortonworks.com/HDP/ubuntu12/2.x/GA/2.2.0.0/hdp.list",
2836+ "/etc/apt/sources.list.d/hdp.list")
2837+ cmd = gpg_script
2838 subprocess.call(cmd)
2839 apt_update()
2840 apt_install(packages)
2841 if not os.path.isdir(os.path.join(os.path.sep,'usr','lib', 'hadoop')):
2842 log("Error, apt-get install Hadoop failed", "ERROR")
2843 sys.exit(1)
2844- os.chdir(home);
2845- wgetPkg("http://public-repo-1.hortonworks.com/HDP/tools/2.1.1.0/hdp_manual_install_rpm_helper_files-2.1.1.385.tar.gz","")
2846- if tarfile.is_tarfile(tarfilename):
2847- tball = tarfile.open(tarfilename)
2848+ os.chdir(home)
2849+ wgetPkg("http://public-repo-1.hortonworks.com/HDP/tools/2.2.0.0/hdp_manual_install_rpm_helper_files-2.2.0.0.2041.tar.gz",
2850+ tarfilename)
2851+ if not tarfile.is_tarfile(tarfilename):
2852+ log.error('Unable to extract Hadoop Tarball')
2853+ sys.exit(1)
2854+ with tarfile.open(tarfilename) as tball:
2855+ extracted_dirname = tball.next().name
2856 tball.extractall(home)
2857- else:
2858- log ("Unable to extract Hadoop Tarball ", "ERROR")
2859 if os.path.isdir(hdpScript):
2860 shutil.rmtree(hdpScript)
2861- os.rename(tarfilenamePre, hdpScript)
2862+ os.rename(extracted_dirname, hdpScript)
2863 log("<== install_base_pkg", "INFO")
2864
2865 def uninstall_base_pkg(packages):
2866@@ -116,10 +119,9 @@
2867 ################ Global values #########################
2868 home = os.path.join(os.path.sep, "home", "ubuntu")
2869 javaPath = "/usr/lib/jvm/java-1.7.0-openjdk-amd64"
2870-tarfilename="hdp_manual_install_rpm_helper_files-2.1.1.385.tar.gz"
2871-tarfilenamePre="hdp_manual_install_rpm_helper_files-2.1.1.385"
2872+tarfilename="/tmp/hdp-helpers.tar.gz"
2873 HDP_PGP_SCRIPT = 'gpg_ubuntu.sh'
2874-gpg_script = os.path.join(os.path.sep, os.environ['CHARM_DIR'], os.path.sep, os.environ['CHARM_DIR'], 'files', 'scripts',HDP_PGP_SCRIPT)
2875+gpg_script = os.path.join(os.environ['CHARM_DIR'], 'files', 'scripts', HDP_PGP_SCRIPT)
2876 hdpScript = "hdp_scripts"
2877 hdpScriptPath = os.path.join(os.path.sep,home, hdpScript,'scripts')
2878 usersAndGroupsScript = os.path.join(os.path.sep, hdpScriptPath, "usersAndGroups.sh")
2879
2880=== added file 'resources.yaml'
2881--- resources.yaml 1970-01-01 00:00:00 +0000
2882+++ resources.yaml 2015-02-13 00:38:49 +0000
2883@@ -0,0 +1,11 @@
2884+resources:
2885+ pathlib:
2886+ pypi: path.py>=7.0
2887+ pyaml:
2888+ pypi: pyaml
2889+ six:
2890+ pypi: six
2891+ charmhelpers:
2892+ pypi: http://bazaar.launchpad.net/~bigdata-dev/bigdata-data/trunk/download/cory.johns%40canonical.com-20150212185722-5i0c57g7725yw71r/charmhelpers0.2.2.ta-20150203190143-yrb1xsbp2bffbyp5-1/charmhelpers-0.2.2.tar.gz
2893+ hash: 4a6daacc352f7381b38e3efffe760f6c43a15452bdaf66e50f234ea425b91873
2894+ hash_type: sha256
2895
2896=== modified file 'tests/01-hadoop-cluster-deployment-1.py'
2897--- tests/01-hadoop-cluster-deployment-1.py 2014-11-21 18:37:23 +0000
2898+++ tests/01-hadoop-cluster-deployment-1.py 2015-02-13 00:38:49 +0000
2899@@ -1,141 +1,85 @@
2900 #!/usr/bin/python3
2901
2902+import unittest
2903 import amulet
2904 import yaml
2905 import os
2906-class TestDeployment(object):
2907- def __init__(self):
2908- self.d = amulet.Deployment(series='trusty')
2909- bpath = os.path.join(os.path.dirname( __file__), "hadoop_cluster.yaml")
2910+
2911+
2912+class TestDeployment(unittest.TestCase):
2913+ @classmethod
2914+ def setUpClass(cls):
2915+ cls.d = amulet.Deployment(series='trusty')
2916+ bpath = os.path.join(os.path.dirname(__file__), "hadoop_cluster.yaml")
2917 with open(bpath) as f:
2918 bun = f.read()
2919- self.d.load(yaml.safe_load(bun))
2920+ cls.d.load(yaml.safe_load(bun))
2921 try:
2922- self.d.setup(timeout=9000)
2923- self.d.sentry.wait()
2924+ cls.d.setup(timeout=9000)
2925+ cls.d.sentry.wait()
2926 except amulet.helpers.TimeoutError:
2927 amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time")
2928 except:
2929 raise
2930- self.master_unit = self.d.sentry.unit['yarn-hdfs-master/0']
2931- self.compute_unit = self.d.sentry.unit['compute-node/0']
2932+ cls.master_unit = cls.d.sentry.unit['yarn-hdfs-master/0']
2933+ cls.compute_unit = cls.d.sentry.unit['compute-node/0']
2934
2935- def run(self):
2936- for test in dir(self):
2937- if test.startswith('test_'):
2938- getattr(self, test)()
2939-############################################################
2940-# Validate hadoop services on master node have been started
2941-############################################################
2942 def test_hadoop_master_service_status(self):
2943- o,c= self.master_unit.run("jps | awk '{print $2}'")
2944- if o.find('ResourceManager') == -1:
2945- amulet.raise_status(amulet.FAIL, msg="ResourceManager not started")
2946- else:
2947- amulet.raise_status(amulet.PASS, msg="ResourceManager started")
2948- if o.find('JobHistoryServer') == -1:
2949- amulet.raise_status(amulet.FAIL, msg="JobHistoryServer not started")
2950- else:
2951- amulet.raise_status(amulet.PASS, msg="JobHistoryServer started")
2952- if o.find('NameNode') == -1:
2953- amulet.raise_status(amulet.FAIL, msg="NameNode not started")
2954- else:
2955- amulet.raise_status(amulet.PASS, msg="NameNode started")
2956+ """
2957+ Validate hadoop services on master node have been started
2958+ """
2959+ output, retcode = self.master_unit.run("pgrep -a java")
2960+ assert 'ResourceManager' in output, "ResourceManager not started"
2961+ assert 'JobHistoryServer' in output, "JobHistoryServer not started"
2962+ assert 'NameNode' in output, "NameNode not started"
2963
2964-############################################################
2965-# Validate hadoop services on compute node have been started
2966-############################################################
2967 def test_hadoop_compute_service_status(self):
2968- o,c= self.compute_unit.run("jps | awk '{print $2}'")
2969- if o.find('NodeManager') == -1:
2970- amulet.raise_status(amulet.FAIL, msg="NodeManager not started")
2971- else:
2972- amulet.raise_status(amulet.PASS, msg="NodeManager started")
2973- if o.find('DataNode') == -1:
2974- amulet.raise_status(amulet.FAIL, msg="DataServer not started")
2975- else:
2976- amulet.raise_status(amulet.PASS, msg="DataServer started")
2977-###########################################################################
2978-# Validate admin few hadoop activities on HDFS cluster.
2979-###########################################################################
2980-# 1) This test validates mkdir on hdfs cluster
2981-###########################################################################
2982- def test_hdfs_mkdir(self):
2983- o,c= slef.master_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/ubuntu'")
2984- if c == 0:
2985- amulet.raise_status(amulet.PASS, msg=" Successfully created a user directory on hdfs")
2986- else:
2987- amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")
2988-###########################################################################
2989-# 2) This test validates change hdfs dir owner on the cluster
2990-###########################################################################
2991- def test_hdfs_dir_owner(self):
2992- o,c= self.master_unit.run("su hdfs -c 'hdfs dfs -chown ubuntu:ubuntu /user/ubuntu'")
2993- if c == 0:
2994- amulet.raise_status(amulet.PASS, msg=" Successfully assigned an owner to directory on hdfs")
2995- else:
2996- amulet.raise_status(amulet.FAIL, msg=" assigning an owner to hdfs directory FAILED")
2997-
2998-###########################################################################
2999-# 3) This test validates setting hdfs directory access permission on the cluster
3000-###########################################################################
3001- def test_hdfs_dir_permission(self):
3002- o,c= self.master_unit.run("su hdfs -c 'hdfs dfs -chmod -R 755 /user/ubuntu'")
3003- if c == 0:
3004- amulet.raise_status(amulet.PASS, msg=" Successfully set directory permission on hdfs")
3005- else:
3006- amulet.raise_status(amulet.FAIL, msg=" seting directory permission on hdfs FAILED")
3007-
3008-###########################################################################
3009-# Validate yarn mapreduce operation
3010-# 1) validate mapreduce execution - writing to hdfs
3011-# 2) validate successful mapreduce operation after the execution
3012-###########################################################################
3013- def test_yarn_mapreduce_exe1(self):
3014- o,c= self.master_unit.run("su ubuntu -c 'hadoop jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples-*.jar teragen 10000 /user/ubuntu/teragenout'")
3015- if c == 0:
3016- amulet.raise_status(amulet.PASS, msg=" Successfull execution of teragen mapreduce app")
3017- else:
3018- amulet.raise_status(amulet.FAIL, msg=" teragen FAILED")
3019-###########################################################################
3020-# 2) validate successful mapreduce operation after the execution
3021-###########################################################################
3022- o,c= self.master_unit.run("su hdfs -c 'hdfs dfs -ls /user/ubuntu/teragenout/_SUCCESS'")
3023- if c == 0:
3024- amulet.raise_status(amulet.PASS, msg="mapreduce operation was success")
3025- else:
3026- amulet.raise_status(amulet.FAIL, msg="mapreduce operation was not a success")
3027-
3028-###########################################################################
3029-# Validate yarn mapreduce operation
3030-# 1) validate mapreduce execution - reading and writing to hdfs
3031-# 2) validate successful mapreduce operation after the execution
3032-###########################################################################
3033- def test_yarn_mapreduce_exe2(self):
3034- o,c= self.master_unit.run("su ubuntu -c 'hadoop jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples-*.jar terasort /user/ubuntu/teragenout /user/ubuntu/terasortout'")
3035- if c == 0:
3036- amulet.raise_status(amulet.PASS, msg=" Successfull execution of terasort mapreduce app")
3037- else:
3038- amulet.raise_status(amulet.FAIL, msg=" terasort FAILED")
3039-###########################################################################
3040-# 2) validate a successful mapreduce operation after the execution
3041-###########################################################################
3042- o,c= self.master_unit.run("su hdfs -c 'hdfs dfs -ls /user/ubuntu/terasortout/_SUCCESS'")
3043- if c == 0:
3044- amulet.raise_status(amulet.PASS, msg="mapreduce operation was success")
3045- else:
3046- amulet.raise_status(amulet.FAIL, msg="mapreduce operation was not a success")
3047-###########################################################################
3048-# validate a successful deletion of mapreduce operation result from hdfs
3049-###########################################################################
3050- def test_hdfs_delete_dir(self):
3051- o,c= self.master_unit.run("su hdfs -c 'hdfs dfs -rm -r /user/ubuntu/teragenout'")
3052- if c == 0:
3053- amulet.raise_status(amulet.PASS, msg="mapreduce result deleted from hdfs cluster")
3054- else:
3055- amulet.raise_status(amulet.FAIL, msg="mapreduce was not deleted from hdfs cluster")
3056+ """
3057+ Validate hadoop services on compute node have been started
3058+ """
3059+ output, retcode = self.compute_unit.run("pgrep -a java")
3060+ assert 'NodeManager' in output, "NodeManager not started"
3061+ assert 'DataNode' in output, "DataServer not started"
3062+
3063+ def test_hdfs_dir(self):
3064+ """
3065+ Validate admin few hadoop activities on HDFS cluster.
3066+ 1) This test validates mkdir on hdfs cluster
3067+ 2) This test validates change hdfs dir owner on the cluster
3068+ 3) This test validates setting hdfs directory access permission on the cluster
3069+
3070+ NB: These are order-dependent, so must be done as part of a single test case.
3071+ """
3072+ output, retcode = self.master_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/ubuntu'")
3073+ assert retcode == 0, "Created a user directory on hdfs FAILED:\n{}".format(output)
3074+ output, retcode = self.master_unit.run("su hdfs -c 'hdfs dfs -chown ubuntu:ubuntu /user/ubuntu'")
3075+ assert retcode == 0, "Assigning an owner to hdfs directory FAILED:\n{}".format(output)
3076+ output, retcode = self.master_unit.run("su hdfs -c 'hdfs dfs -chmod -R 755 /user/ubuntu'")
3077+ assert retcode == 0, "seting directory permission on hdfs FAILED:\n{}".format(output)
3078+
3079+ def test_yarn_mapreduce_exe(self):
3080+ """
3081+ Validate yarn mapreduce operations:
3082+ 1) validate mapreduce execution - writing to hdfs
3083+ 2) validate successful mapreduce operation after the execution
3084+ 3) validate mapreduce execution - reading and writing to hdfs
3085+ 4) validate successful mapreduce operation after the execution
3086+ 5) validate successful deletion of mapreduce operation result from hdfs
3087+
3088+ NB: These are order-dependent, so must be done as part of a single test case.
3089+ """
3090+ jar_file = '/usr/hdp/current/hadoop-mapreduce-client/hadoop-mapreduce-examples.jar'
3091+ test_steps = [
3092+ ('teragen', "su ubuntu -c 'hadoop jar {} teragen 10000 /user/ubuntu/teragenout'".format(jar_file)),
3093+ ('mapreduce #1', "su hdfs -c 'hdfs dfs -ls /user/ubuntu/teragenout/_SUCCESS'"),
3094+ ('terasort', "su ubuntu -c 'hadoop jar {} terasort /user/ubuntu/teragenout /user/ubuntu/terasortout'".format(jar_file)),
3095+ ('mapreduce #2', "su hdfs -c 'hdfs dfs -ls /user/ubuntu/terasortout/_SUCCESS'"),
3096+ ('cleanup', "su hdfs -c 'hdfs dfs -rm -r /user/ubuntu/teragenout'"),
3097+ ]
3098+ for name, step in test_steps:
3099+ output, retcode = self.master_unit.run(step)
3100+ assert retcode == 0, "{} FAILED:\n{}".format(name, output)
3101
3102
3103 if __name__ == '__main__':
3104- runner = TestDeployment()
3105- runner.run()
3106+ unittest.main()

Subscribers

People subscribed via source and target branches