Merge lp:~chad.smith/charms/precise/storage/storage-ec2-support into lp:~dpb/charms/precise/storage/trunk

Proposed by Chad Smith
Status: Merged
Merged at revision: 32
Proposed branch: lp:~chad.smith/charms/precise/storage/storage-ec2-support
Merge into: lp:~dpb/charms/precise/storage/trunk
Diff against target: 223 lines (+151/-7)
2 files modified
hooks/common_util.py (+32/-4)
hooks/test_common_util.py (+119/-3)
To merge this branch: bzr merge lp:~chad.smith/charms/precise/storage/storage-ec2-support
Reviewer Review Type Date Requested Status
David Britton Approve
Fernando Correa Neto (community) Approve
Review via email: mp+206834@code.launchpad.net

Description of the change

Allow get_instance_id to work on both Openstack and EC2 instances.

To do this we first attempt a curl of the OS_METADATA_URL to pull the instance uuid which is used by nova commands for volume attachment.

If curl of OS_METADATA_URL fails to return valid JSON, we then try to curl EC2_METADATA_URL to grab the EC2 instance id.

This branch adds missing unit tests for get_instance_id as well.

To post a comment you must log in.
35. By Chad Smith

docstring fix

36. By Chad Smith

support attached devices that are already partitioned, such as old postgresql charm upgrade_charm path

Revision history for this message
Fernando Correa Neto (fcorrea) wrote :

Code looks good but we have an issue because in order to mount an existing volume, the BSB service must be running on the same id.
That means that we must know upfront in which EC2 AZ the volume is, and if possible, deploy the BSB service in the same AZ so it can be successfully be attached.

review: Approve
Revision history for this message
David Britton (dpb) wrote :

On Tue, Mar 04, 2014 at 09:43:16AM -0000, Fernando Correa Neto wrote:
> Review: Approve
>
> Code looks good but we have an issue because in order to mount an
> existing volume, the BSB service must be running on the same id. That
> means that we must know upfront in which EC2 AZ the volume is, and if
> possible, deploy the BSB service in the same AZ so it can be
> successfully be attached.

Should the AZ be specified in the BSB config?

--
David Britton <email address hidden>

Revision history for this message
David Britton (dpb) wrote :

[0] Can we get rid of the 'else' clauses (two of them) in the get_instance_id() method? Just return directly from the try block?

Otherwise, +1.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/common_util.py'
2--- hooks/common_util.py 2014-02-13 00:41:13 +0000
3+++ hooks/common_util.py 2014-02-25 23:09:49 +0000
4@@ -9,7 +9,8 @@
5
6 CHARM_DIR = os.environ.get("CHARM_DIR")
7 FSTAB_FILE = "/etc/fstab"
8-METADATA_URL = "http://169.254.169.254/openstack/2012-08-10/meta_data.json"
9+EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/instance-id"
10+OS_METADATA_URL = "http://169.254.169.254/openstack/2012-08-10/meta_data.json"
11 PERSIST_DATA_FILE = "charm-data"
12
13
14@@ -17,6 +18,7 @@
15 """Quaint little log wrapper for juju logging"""
16 hookenv.log(message, level)
17
18+
19 # XXX We can remove _persist_data when juju bug:1279018 is fixed to provide
20 # relation-data from *-relation-departed hook runs
21 def _persist_data(key, value):
22@@ -262,6 +264,14 @@
23 sys.exit(0)
24
25 _assert_block_device(device_path)
26+ if os.path.exists("%s1" % device_path):
27+ # Then we are supporting upgrade-charm path for deprecated
28+ # postgresql volume-map config parameter. Postgresql used to
29+ # partition the volume into a single partition at 100% of the
30+ # volume size, but the storage charm doesn't do that partitioning
31+ device_path = "%s1" % device_path
32+ log("Mounting %s as the device path as the root device is already "
33+ "partitioned." % device_path)
34 _format_device(device_path)
35 _set_label(device_path, mountpoint)
36 fstype = subprocess.check_output(
37@@ -288,6 +298,24 @@
38
39
40 def get_instance_id():
41- metadata = json.loads(
42- subprocess.check_output("curl %s" % METADATA_URL, shell=True))
43- return metadata["uuid"]
44+ """
45+ Curl metadata for openstack from C{OS_METADATA_URL} to obtain the C{uuid}.
46+ If that fails, we are likely on EC2 and curl the C{EC2_METADATA_URL} to get
47+ the EC2 C{instance-id}.
48+ """
49+ try:
50+ metadata = json.loads(
51+ subprocess.check_output("curl %s" % OS_METADATA_URL, shell=True))
52+ except ValueError:
53+ pass # We are probably on EC2, try below
54+ else:
55+ return metadata["uuid"]
56+ try:
57+ metadata = subprocess.check_output(
58+ "curl %s" % EC2_METADATA_URL, shell=True)
59+ except subprocess.CalledProcessError, e:
60+ log("Error: couldn't discover ec2 instance id. %s" % str(e),
61+ hookenv.ERROR)
62+ sys.exit(1)
63+ else:
64+ return metadata.strip()
65
66=== modified file 'hooks/test_common_util.py'
67--- hooks/test_common_util.py 2014-02-13 00:41:13 +0000
68+++ hooks/test_common_util.py 2014-02-25 23:09:49 +0000
69@@ -140,6 +140,58 @@
70 message = "Error: no provider defined in storage charm config"
71 self.assertTrue(util.hookenv.is_logged(message), "Error not logged")
72
73+ def test_get_instance_id_when_openstack(self):
74+ """
75+ L{get_instance_id} will attempt a curl for openstack metadata to
76+ extract the C{uuid} as the instance_id from openstack JSON metadata.
77+ """
78+ os_metadata = {
79+ "uuid": "i-123123"
80+ }
81+ curl = self.mocker.replace(subprocess.check_output)
82+ curl("curl %s" % util.OS_METADATA_URL, shell=True)
83+ self.mocker.result(json.dumps(os_metadata))
84+ self.mocker.replay()
85+ self.assertEqual(util.get_instance_id(), "i-123123")
86+
87+ def test_get_instance_id_when_ec2(self):
88+ """
89+ L{get_instance_id} will first attempt a curl for openstack metadata.
90+ When that fails to be valid json, because we are on an EC2 instance,
91+ it will curl the C{EC2_METADATA_URL} to grab the C{instance-id}.
92+ """
93+ instance_id = "i-123123"
94+ curl = self.mocker.replace(subprocess.check_output)
95+ curl("curl %s" % util.OS_METADATA_URL, shell=True)
96+ self.mocker.throw(ValueError("invalid JSON"))
97+ curl("curl %s" % util.EC2_METADATA_URL, shell=True)
98+ self.mocker.result(instance_id)
99+ self.mocker.replay()
100+ self.assertEqual(util.get_instance_id(), instance_id)
101+
102+ def test_get_instance_id_fails_on_ec2(self):
103+ """
104+ When L{get_instance_id} fails to curl C{EC2_METADATA_URL} it will log
105+ the error and exit in error.
106+ """
107+ ec2_command = "curl %s" % util.EC2_METADATA_URL
108+
109+ curl = self.mocker.replace(subprocess.check_output)
110+ curl("curl %s" % util.OS_METADATA_URL, shell=True)
111+ self.mocker.throw(ValueError("invalid JSON"))
112+ curl(ec2_command, shell=True)
113+ self.mocker.throw(
114+ subprocess.CalledProcessError(
115+ 1, "curl %s" % util.EC2_METADATA_URL))
116+ self.mocker.replay()
117+ result = self.assertRaises(SystemExit, util.get_instance_id)
118+ self.assertEqual(result.code, 1)
119+ message = (
120+ "Error: couldn't discover ec2 instance id. Command '%s' returned "
121+ "non-zero exit status 1" % ec2_command)
122+ self.assertTrue(
123+ util.hookenv.is_logged(message), "Not logged- %s" % message)
124+
125 def test_is_mounted_when_mountpoint_is_mounted(self):
126 """
127 L{is_mounted} returns C{True} when the mountpath is mounted.
128@@ -499,9 +551,8 @@
129 with open(persist_path) as infile:
130 data = json.load(infile)
131 self.assertEqual(data["mountpoint"], "/mnt/this")
132-
133
134- def test_mount_volume_mountpoint_from_relation_data(self):
135+ def test_mount_volume_mountpoint_from_relation_data_already_mounted(self):
136 """
137 L{mount_volume} will log a message and return when the C{mountpoint} is
138 already defined in relation data and the requested C{mountpoint} is
139@@ -647,6 +698,9 @@
140
141 assert_device = self.mocker.replace(util._assert_block_device)
142 assert_device("/dev/vdx")
143+ exists = self.mocker.replace(os.path.exists)
144+ exists("/dev/vdx1")
145+ self.mocker.result(False)
146 create_partition = self.mocker.replace(
147 util._format_device)
148 create_partition("/dev/vdx")
149@@ -659,7 +713,6 @@
150 get_fstype("blkid -o value -s TYPE /dev/vdx", shell=True)
151 self.mocker.result("ext4\n")
152
153- exists = self.mocker.replace(os.path.exists)
154 exists("/mnt/this")
155 self.mocker.result(True)
156
157@@ -682,3 +735,66 @@
158 self.assertEqual(
159 util.hookenv._outgoing_relation_data,
160 (("mountpoint", "/mnt/this"),))
161+
162+ def test_mount_volume_bsb_provider_success_postgresql_charm_upgrade(self):
163+ """
164+ Old versions of the postgresql charm would have created a single
165+ partition with max size from an external attached volume if the user
166+ had used postgresql charm's deprecated volume-map parameter. When the
167+ storage charm sees a single partition on the attached device, it will
168+ attempt to mount that partition instead of the root device.
169+ The success of the mount will log a message and the storage charm will
170+ call relation-set C{mountpoint} to report the successful mount to the
171+ principal.
172+ """
173+ util.hookenv._incoming_relation_data = (
174+ ("data:1", "mountpoint", "/mnt/this"),)
175+
176+ util.hookenv._config = (("provider", "block-storage-broker"),)
177+
178+ ismount = self.mocker.replace(os.path.ismount)
179+ ismount("/mnt/this")
180+ self.mocker.result(False) # Mock not mounted
181+
182+ assert_device = self.mocker.replace(util._assert_block_device)
183+ assert_device("/dev/vdx")
184+ exists = self.mocker.replace(os.path.exists)
185+ exists("/dev/vdx1")
186+ self.mocker.result(True)
187+ create_partition = self.mocker.replace(
188+ util._format_device)
189+ create_partition("/dev/vdx1")
190+
191+ _set_label = self.mocker.replace(util._set_label)
192+ _set_label("/dev/vdx1", "/mnt/this")
193+
194+ # Ensure blkid is called to discover fstype of nova device
195+ get_fstype = self.mocker.replace(subprocess.check_output)
196+ get_fstype("blkid -o value -s TYPE /dev/vdx1", shell=True)
197+ self.mocker.result("ext4\n")
198+
199+ exists("/mnt/this")
200+ self.mocker.result(True)
201+
202+ mount = self.mocker.replace(subprocess.check_call)
203+ mount("mount -t ext4 /dev/vdx1 /mnt/this", shell=True)
204+ self.mocker.replay()
205+
206+ self.assertEqual(util.get_provider(), "block-storage-broker")
207+ util.mount_volume("/dev/vdx")
208+ messages = ["Mount (/mnt/this) successful",
209+ "Mounting /dev/vdx1 as the device path as the root device "
210+ "is already partitioned."]
211+ for message in messages:
212+ self.assertTrue(
213+ util.hookenv.is_logged(message), "Not logged- %s" % message)
214+
215+ # Wrote proper fstab mount info to mount on reboot
216+ fstab_content = "/dev/vdx1 /mnt/this ext4 default 0 0"
217+ with open(util.FSTAB_FILE) as input_file:
218+ self.assertEqual(input_file.read(), fstab_content)
219+
220+ # Reported success to principal
221+ self.assertEqual(
222+ util.hookenv._outgoing_relation_data,
223+ (("mountpoint", "/mnt/this"),))

Subscribers

People subscribed via source and target branches

to all changes: