Merge lp:~eday/nova/pep8-fixes-other into lp:~hudson-openstack/nova/trunk
- pep8-fixes-other
- Merge into trunk
Proposed by
Eric Day
Status: | Merged |
---|---|
Approved by: | Eric Day |
Approved revision: | 379 |
Merged at revision: | 379 |
Proposed branch: | lp:~eday/nova/pep8-fixes-other |
Merge into: | lp:~hudson-openstack/nova/trunk |
Prerequisite: | lp:~eday/nova/pep8-fixes-db |
Diff against target: |
1598 lines (+274/-236) 24 files modified
nova/auth/dbdriver.py (+27/-24) nova/auth/fakeldap.py (+5/-6) nova/auth/ldapdriver.py (+7/-5) nova/auth/manager.py (+1/-1) nova/cloudpipe/pipelib.py (+19/-11) nova/compute/disk.py (+7/-8) nova/compute/monitor.py (+52/-61) nova/compute/power_state.py (+6/-7) nova/image/service.py (+9/-8) nova/image/services/glance/__init__.py (+4/-4) nova/network/linux_net.py (+7/-3) nova/network/manager.py (+2/-3) nova/objectstore/bucket.py (+12/-8) nova/objectstore/handler.py (+12/-9) nova/objectstore/image.py (+23/-18) nova/objectstore/stored.py (+2/-2) nova/scheduler/driver.py (+2/-0) nova/scheduler/manager.py (+2/-1) nova/scheduler/simple.py (+1/-0) nova/virt/fake.py (+1/-0) nova/virt/images.py (+2/-2) nova/virt/libvirt_conn.py (+56/-41) nova/virt/xenapi.py (+15/-13) nova/volume/driver.py (+0/-1) |
To merge this branch: | bzr merge lp:~eday/nova/pep8-fixes-other |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jay Pipes (community) | Approve | ||
Vish Ishaya (community) | Approve | ||
Review via email:
|
Commit message
Description of the change
Another pep8 cleanup branch for nova/*, should be merged after lp:~eday/nova/pep8-fixes-db.
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Jay Pipes (jaypipes) wrote : | # |
194 -import string # pylint: disable-msg=W0402
195 +import string # pylint: disable-msg=W0402
114 -SCOPE_ONELEVEL = 1 # not implemented
115 +SCOPE_ONELEVEL = 1 # Not implemented
Not sure what the "fix" is for the above comments... is there a PEP8 requirement for >1 space between a code line and a comment?
Other than that, looks good.
review:
Needs Information
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Eric Day (eday) wrote : | # |
Yup, you get a message like:
bin/nova-
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Jay Pipes (jaypipes) wrote : | # |
Heh, interesting. :)
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'nova/auth/dbdriver.py' | |||
2 | --- nova/auth/dbdriver.py 2010-10-14 05:18:01 +0000 | |||
3 | +++ nova/auth/dbdriver.py 2010-10-22 00:19:41 +0000 | |||
4 | @@ -47,19 +47,23 @@ | |||
5 | 47 | 47 | ||
6 | 48 | def get_user(self, uid): | 48 | def get_user(self, uid): |
7 | 49 | """Retrieve user by id""" | 49 | """Retrieve user by id""" |
9 | 50 | return self._db_user_to_auth_user(db.user_get(context.get_admin_context(), uid)) | 50 | user = db.user_get(context.get_admin_context(), uid) |
10 | 51 | return self._db_user_to_auth_user(user) | ||
11 | 51 | 52 | ||
12 | 52 | def get_user_from_access_key(self, access): | 53 | def get_user_from_access_key(self, access): |
13 | 53 | """Retrieve user by access key""" | 54 | """Retrieve user by access key""" |
15 | 54 | return self._db_user_to_auth_user(db.user_get_by_access_key(context.get_admin_context(), access)) | 55 | user = db.user_get_by_access_key(context.get_admin_context(), access) |
16 | 56 | return self._db_user_to_auth_user(user) | ||
17 | 55 | 57 | ||
18 | 56 | def get_project(self, pid): | 58 | def get_project(self, pid): |
19 | 57 | """Retrieve project by id""" | 59 | """Retrieve project by id""" |
21 | 58 | return self._db_project_to_auth_projectuser(db.project_get(context.get_admin_context(), pid)) | 60 | project = db.project_get(context.get_admin_context(), pid) |
22 | 61 | return self._db_project_to_auth_projectuser(project) | ||
23 | 59 | 62 | ||
24 | 60 | def get_users(self): | 63 | def get_users(self): |
25 | 61 | """Retrieve list of users""" | 64 | """Retrieve list of users""" |
27 | 62 | return [self._db_user_to_auth_user(user) for user in db.user_get_all(context.get_admin_context())] | 65 | return [self._db_user_to_auth_user(user) |
28 | 66 | for user in db.user_get_all(context.get_admin_context())] | ||
29 | 63 | 67 | ||
30 | 64 | def get_projects(self, uid=None): | 68 | def get_projects(self, uid=None): |
31 | 65 | """Retrieve list of projects""" | 69 | """Retrieve list of projects""" |
32 | @@ -71,11 +75,10 @@ | |||
33 | 71 | 75 | ||
34 | 72 | def create_user(self, name, access_key, secret_key, is_admin): | 76 | def create_user(self, name, access_key, secret_key, is_admin): |
35 | 73 | """Create a user""" | 77 | """Create a user""" |
41 | 74 | values = { 'id' : name, | 78 | values = {'id': name, |
42 | 75 | 'access_key' : access_key, | 79 | 'access_key': access_key, |
43 | 76 | 'secret_key' : secret_key, | 80 | 'secret_key': secret_key, |
44 | 77 | 'is_admin' : is_admin | 81 | 'is_admin': is_admin} |
40 | 78 | } | ||
45 | 79 | try: | 82 | try: |
46 | 80 | user_ref = db.user_create(context.get_admin_context(), values) | 83 | user_ref = db.user_create(context.get_admin_context(), values) |
47 | 81 | return self._db_user_to_auth_user(user_ref) | 84 | return self._db_user_to_auth_user(user_ref) |
48 | @@ -83,18 +86,19 @@ | |||
49 | 83 | raise exception.Duplicate('User %s already exists' % name) | 86 | raise exception.Duplicate('User %s already exists' % name) |
50 | 84 | 87 | ||
51 | 85 | def _db_user_to_auth_user(self, user_ref): | 88 | def _db_user_to_auth_user(self, user_ref): |
57 | 86 | return { 'id' : user_ref['id'], | 89 | return {'id': user_ref['id'], |
58 | 87 | 'name' : user_ref['id'], | 90 | 'name': user_ref['id'], |
59 | 88 | 'access' : user_ref['access_key'], | 91 | 'access': user_ref['access_key'], |
60 | 89 | 'secret' : user_ref['secret_key'], | 92 | 'secret': user_ref['secret_key'], |
61 | 90 | 'admin' : user_ref['is_admin'] } | 93 | 'admin': user_ref['is_admin']} |
62 | 91 | 94 | ||
63 | 92 | def _db_project_to_auth_projectuser(self, project_ref): | 95 | def _db_project_to_auth_projectuser(self, project_ref): |
69 | 93 | return { 'id' : project_ref['id'], | 96 | member_ids = [member['id'] for member in project_ref['members']] |
70 | 94 | 'name' : project_ref['name'], | 97 | return {'id': project_ref['id'], |
71 | 95 | 'project_manager_id' : project_ref['project_manager'], | 98 | 'name': project_ref['name'], |
72 | 96 | 'description' : project_ref['description'], | 99 | 'project_manager_id': project_ref['project_manager'], |
73 | 97 | 'member_ids' : [member['id'] for member in project_ref['members']] } | 100 | 'description': project_ref['description'], |
74 | 101 | 'member_ids': member_ids} | ||
75 | 98 | 102 | ||
76 | 99 | def create_project(self, name, manager_uid, | 103 | def create_project(self, name, manager_uid, |
77 | 100 | description=None, member_uids=None): | 104 | description=None, member_uids=None): |
78 | @@ -121,10 +125,10 @@ | |||
79 | 121 | % member_uid) | 125 | % member_uid) |
80 | 122 | members.add(member) | 126 | members.add(member) |
81 | 123 | 127 | ||
86 | 124 | values = { 'id' : name, | 128 | values = {'id': name, |
87 | 125 | 'name' : name, | 129 | 'name': name, |
88 | 126 | 'project_manager' : manager['id'], | 130 | 'project_manager': manager['id'], |
89 | 127 | 'description': description } | 131 | 'description': description} |
90 | 128 | 132 | ||
91 | 129 | try: | 133 | try: |
92 | 130 | project = db.project_create(context.get_admin_context(), values) | 134 | project = db.project_create(context.get_admin_context(), values) |
93 | @@ -244,4 +248,3 @@ | |||
94 | 244 | if not project: | 248 | if not project: |
95 | 245 | raise exception.NotFound('Project "%s" not found' % project_id) | 249 | raise exception.NotFound('Project "%s" not found' % project_id) |
96 | 246 | return user, project | 250 | return user, project |
97 | 247 | |||
98 | 248 | 251 | ||
99 | === modified file 'nova/auth/fakeldap.py' | |||
100 | --- nova/auth/fakeldap.py 2010-10-14 13:07:37 +0000 | |||
101 | +++ nova/auth/fakeldap.py 2010-10-22 00:19:41 +0000 | |||
102 | @@ -35,6 +35,7 @@ | |||
103 | 35 | 'Port that redis is running on.') | 35 | 'Port that redis is running on.') |
104 | 36 | flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') | 36 | flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') |
105 | 37 | 37 | ||
106 | 38 | |||
107 | 38 | class Redis(object): | 39 | class Redis(object): |
108 | 39 | def __init__(self): | 40 | def __init__(self): |
109 | 40 | if hasattr(self.__class__, '_instance'): | 41 | if hasattr(self.__class__, '_instance'): |
110 | @@ -51,19 +52,19 @@ | |||
111 | 51 | 52 | ||
112 | 52 | 53 | ||
113 | 53 | SCOPE_BASE = 0 | 54 | SCOPE_BASE = 0 |
115 | 54 | SCOPE_ONELEVEL = 1 # not implemented | 55 | SCOPE_ONELEVEL = 1 # Not implemented |
116 | 55 | SCOPE_SUBTREE = 2 | 56 | SCOPE_SUBTREE = 2 |
117 | 56 | MOD_ADD = 0 | 57 | MOD_ADD = 0 |
118 | 57 | MOD_DELETE = 1 | 58 | MOD_DELETE = 1 |
119 | 58 | MOD_REPLACE = 2 | 59 | MOD_REPLACE = 2 |
120 | 59 | 60 | ||
121 | 60 | 61 | ||
123 | 61 | class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 | 62 | class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 |
124 | 62 | """Duplicate exception class from real LDAP module.""" | 63 | """Duplicate exception class from real LDAP module.""" |
125 | 63 | pass | 64 | pass |
126 | 64 | 65 | ||
127 | 65 | 66 | ||
129 | 66 | class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 | 67 | class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 |
130 | 67 | """Duplicate exception class from real LDAP module.""" | 68 | """Duplicate exception class from real LDAP module.""" |
131 | 68 | pass | 69 | pass |
132 | 69 | 70 | ||
133 | @@ -251,8 +252,6 @@ | |||
134 | 251 | return objects | 252 | return objects |
135 | 252 | 253 | ||
136 | 253 | @property | 254 | @property |
138 | 254 | def __redis_prefix(self): # pylint: disable-msg=R0201 | 255 | def __redis_prefix(self): # pylint: disable-msg=R0201 |
139 | 255 | """Get the prefix to use for all redis keys.""" | 256 | """Get the prefix to use for all redis keys.""" |
140 | 256 | return 'ldap:' | 257 | return 'ldap:' |
141 | 257 | |||
142 | 258 | |||
143 | 259 | 258 | ||
144 | === modified file 'nova/auth/ldapdriver.py' | |||
145 | --- nova/auth/ldapdriver.py 2010-09-25 03:32:00 +0000 | |||
146 | +++ nova/auth/ldapdriver.py 2010-10-22 00:19:41 +0000 | |||
147 | @@ -294,24 +294,26 @@ | |||
148 | 294 | 294 | ||
149 | 295 | def __find_dns(self, dn, query=None, scope=None): | 295 | def __find_dns(self, dn, query=None, scope=None): |
150 | 296 | """Find dns by query""" | 296 | """Find dns by query""" |
152 | 297 | if scope is None: # one of the flags is 0!! | 297 | if scope is None: |
153 | 298 | # One of the flags is 0! | ||
154 | 298 | scope = self.ldap.SCOPE_SUBTREE | 299 | scope = self.ldap.SCOPE_SUBTREE |
155 | 299 | try: | 300 | try: |
156 | 300 | res = self.conn.search_s(dn, scope, query) | 301 | res = self.conn.search_s(dn, scope, query) |
157 | 301 | except self.ldap.NO_SUCH_OBJECT: | 302 | except self.ldap.NO_SUCH_OBJECT: |
158 | 302 | return [] | 303 | return [] |
160 | 303 | # just return the DNs | 304 | # Just return the DNs |
161 | 304 | return [dn for dn, _attributes in res] | 305 | return [dn for dn, _attributes in res] |
162 | 305 | 306 | ||
163 | 306 | def __find_objects(self, dn, query=None, scope=None): | 307 | def __find_objects(self, dn, query=None, scope=None): |
164 | 307 | """Find objects by query""" | 308 | """Find objects by query""" |
166 | 308 | if scope is None: # one of the flags is 0!! | 309 | if scope is None: |
167 | 310 | # One of the flags is 0! | ||
168 | 309 | scope = self.ldap.SCOPE_SUBTREE | 311 | scope = self.ldap.SCOPE_SUBTREE |
169 | 310 | try: | 312 | try: |
170 | 311 | res = self.conn.search_s(dn, scope, query) | 313 | res = self.conn.search_s(dn, scope, query) |
171 | 312 | except self.ldap.NO_SUCH_OBJECT: | 314 | except self.ldap.NO_SUCH_OBJECT: |
172 | 313 | return [] | 315 | return [] |
174 | 314 | # just return the attributes | 316 | # Just return the attributes |
175 | 315 | return [attributes for dn, attributes in res] | 317 | return [attributes for dn, attributes in res] |
176 | 316 | 318 | ||
177 | 317 | def __find_role_dns(self, tree): | 319 | def __find_role_dns(self, tree): |
178 | @@ -480,6 +482,6 @@ | |||
179 | 480 | class FakeLdapDriver(LdapDriver): | 482 | class FakeLdapDriver(LdapDriver): |
180 | 481 | """Fake Ldap Auth driver""" | 483 | """Fake Ldap Auth driver""" |
181 | 482 | 484 | ||
183 | 483 | def __init__(self): # pylint: disable-msg=W0231 | 485 | def __init__(self): # pylint: disable-msg=W0231 |
184 | 484 | __import__('nova.auth.fakeldap') | 486 | __import__('nova.auth.fakeldap') |
185 | 485 | self.ldap = sys.modules['nova.auth.fakeldap'] | 487 | self.ldap = sys.modules['nova.auth.fakeldap'] |
186 | 486 | 488 | ||
187 | === modified file 'nova/auth/manager.py' | |||
188 | --- nova/auth/manager.py 2010-10-15 15:18:40 +0000 | |||
189 | +++ nova/auth/manager.py 2010-10-22 00:19:41 +0000 | |||
190 | @@ -23,7 +23,7 @@ | |||
191 | 23 | import logging | 23 | import logging |
192 | 24 | import os | 24 | import os |
193 | 25 | import shutil | 25 | import shutil |
195 | 26 | import string # pylint: disable-msg=W0402 | 26 | import string # pylint: disable-msg=W0402 |
196 | 27 | import tempfile | 27 | import tempfile |
197 | 28 | import uuid | 28 | import uuid |
198 | 29 | import zipfile | 29 | import zipfile |
199 | 30 | 30 | ||
200 | === modified file 'nova/cloudpipe/pipelib.py' | |||
201 | --- nova/cloudpipe/pipelib.py 2010-10-01 12:57:17 +0000 | |||
202 | +++ nova/cloudpipe/pipelib.py 2010-10-22 00:19:41 +0000 | |||
203 | @@ -49,7 +49,7 @@ | |||
204 | 49 | self.manager = manager.AuthManager() | 49 | self.manager = manager.AuthManager() |
205 | 50 | 50 | ||
206 | 51 | def launch_vpn_instance(self, project_id): | 51 | def launch_vpn_instance(self, project_id): |
208 | 52 | logging.debug( "Launching VPN for %s" % (project_id)) | 52 | logging.debug("Launching VPN for %s" % (project_id)) |
209 | 53 | project = self.manager.get_project(project_id) | 53 | project = self.manager.get_project(project_id) |
210 | 54 | # Make a payload.zip | 54 | # Make a payload.zip |
211 | 55 | tmpfolder = tempfile.mkdtemp() | 55 | tmpfolder = tempfile.mkdtemp() |
212 | @@ -57,16 +57,18 @@ | |||
213 | 57 | zippath = os.path.join(tmpfolder, filename) | 57 | zippath = os.path.join(tmpfolder, filename) |
214 | 58 | z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED) | 58 | z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED) |
215 | 59 | 59 | ||
217 | 60 | z.write(FLAGS.boot_script_template,'autorun.sh') | 60 | z.write(FLAGS.boot_script_template, 'autorun.sh') |
218 | 61 | z.close() | 61 | z.close() |
219 | 62 | 62 | ||
220 | 63 | key_name = self.setup_key_pair(project.project_manager_id, project_id) | 63 | key_name = self.setup_key_pair(project.project_manager_id, project_id) |
221 | 64 | zippy = open(zippath, "r") | 64 | zippy = open(zippath, "r") |
223 | 65 | context = context.RequestContext(user=project.project_manager, project=project) | 65 | context = context.RequestContext(user=project.project_manager, |
224 | 66 | project=project) | ||
225 | 66 | 67 | ||
226 | 67 | reservation = self.controller.run_instances(context, | 68 | reservation = self.controller.run_instances(context, |
229 | 68 | # run instances expects encoded userdata, it is decoded in the get_metadata_call | 69 | # Run instances expects encoded userdata, it is decoded in the |
230 | 69 | # autorun.sh also decodes the zip file, hence the double encoding | 70 | # get_metadata_call. autorun.sh also decodes the zip file, hence |
231 | 71 | # the double encoding. | ||
232 | 70 | user_data=zippy.read().encode("base64").encode("base64"), | 72 | user_data=zippy.read().encode("base64").encode("base64"), |
233 | 71 | max_count=1, | 73 | max_count=1, |
234 | 72 | min_count=1, | 74 | min_count=1, |
235 | @@ -79,12 +81,14 @@ | |||
236 | 79 | def setup_key_pair(self, user_id, project_id): | 81 | def setup_key_pair(self, user_id, project_id): |
237 | 80 | key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix) | 82 | key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix) |
238 | 81 | try: | 83 | try: |
240 | 82 | private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name) | 84 | private_key, fingerprint = self.manager.generate_key_pair(user_id, |
241 | 85 | key_name) | ||
242 | 83 | try: | 86 | try: |
243 | 84 | key_dir = os.path.join(FLAGS.keys_path, user_id) | 87 | key_dir = os.path.join(FLAGS.keys_path, user_id) |
244 | 85 | if not os.path.exists(key_dir): | 88 | if not os.path.exists(key_dir): |
245 | 86 | os.makedirs(key_dir) | 89 | os.makedirs(key_dir) |
247 | 87 | with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f: | 90 | file_name = os.path.join(key_dir, '%s.pem' % key_name) |
248 | 91 | with open(file_name, 'w') as f: | ||
249 | 88 | f.write(private_key) | 92 | f.write(private_key) |
250 | 89 | except: | 93 | except: |
251 | 90 | pass | 94 | pass |
252 | @@ -95,9 +99,13 @@ | |||
253 | 95 | # def setup_secgroups(self, username): | 99 | # def setup_secgroups(self, username): |
254 | 96 | # conn = self.euca.connection_for(username) | 100 | # conn = self.euca.connection_for(username) |
255 | 97 | # try: | 101 | # try: |
260 | 98 | # secgroup = conn.create_security_group("vpn-secgroup", "vpn-secgroup") | 102 | # secgroup = conn.create_security_group("vpn-secgroup", |
261 | 99 | # secgroup.authorize(ip_protocol = "udp", from_port = "1194", to_port = "1194", cidr_ip = "0.0.0.0/0") | 103 | # "vpn-secgroup") |
262 | 100 | # secgroup.authorize(ip_protocol = "tcp", from_port = "80", to_port = "80", cidr_ip = "0.0.0.0/0") | 104 | # secgroup.authorize(ip_protocol = "udp", from_port = "1194", |
263 | 101 | # secgroup.authorize(ip_protocol = "tcp", from_port = "22", to_port = "22", cidr_ip = "0.0.0.0/0") | 105 | # to_port = "1194", cidr_ip = "0.0.0.0/0") |
264 | 106 | # secgroup.authorize(ip_protocol = "tcp", from_port = "80", | ||
265 | 107 | # to_port = "80", cidr_ip = "0.0.0.0/0") | ||
266 | 108 | # secgroup.authorize(ip_protocol = "tcp", from_port = "22", | ||
267 | 109 | # to_port = "22", cidr_ip = "0.0.0.0/0") | ||
268 | 102 | # except: | 110 | # except: |
269 | 103 | # pass | 111 | # pass |
270 | 104 | 112 | ||
271 | === modified file 'nova/compute/disk.py' | |||
272 | --- nova/compute/disk.py 2010-10-18 20:40:03 +0000 | |||
273 | +++ nova/compute/disk.py 2010-10-22 00:19:41 +0000 | |||
274 | @@ -72,12 +72,12 @@ | |||
275 | 72 | " by sector size: %d / %d", local_bytes, sector_size) | 72 | " by sector size: %d / %d", local_bytes, sector_size) |
276 | 73 | local_sectors = local_bytes / sector_size | 73 | local_sectors = local_bytes / sector_size |
277 | 74 | 74 | ||
284 | 75 | mbr_last = 62 # a | 75 | mbr_last = 62 # a |
285 | 76 | primary_first = mbr_last + 1 # b | 76 | primary_first = mbr_last + 1 # b |
286 | 77 | primary_last = primary_first + primary_sectors - 1 # c | 77 | primary_last = primary_first + primary_sectors - 1 # c |
287 | 78 | local_first = primary_last + 1 # d | 78 | local_first = primary_last + 1 # d |
288 | 79 | local_last = local_first + local_sectors - 1 # e | 79 | local_last = local_first + local_sectors - 1 # e |
289 | 80 | last_sector = local_last # e | 80 | last_sector = local_last # e |
290 | 81 | 81 | ||
291 | 82 | # create an empty file | 82 | # create an empty file |
292 | 83 | yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' | 83 | yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' |
293 | @@ -157,7 +157,7 @@ | |||
294 | 157 | @defer.inlineCallbacks | 157 | @defer.inlineCallbacks |
295 | 158 | def _inject_key_into_fs(key, fs, execute=None): | 158 | def _inject_key_into_fs(key, fs, execute=None): |
296 | 159 | sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') | 159 | sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') |
298 | 160 | yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter | 160 | yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter |
299 | 161 | yield execute('sudo chown root %s' % sshdir) | 161 | yield execute('sudo chown root %s' % sshdir) |
300 | 162 | yield execute('sudo chmod 700 %s' % sshdir) | 162 | yield execute('sudo chmod 700 %s' % sshdir) |
301 | 163 | keyfile = os.path.join(sshdir, 'authorized_keys') | 163 | keyfile = os.path.join(sshdir, 'authorized_keys') |
302 | @@ -169,4 +169,3 @@ | |||
303 | 169 | netfile = os.path.join(os.path.join(os.path.join( | 169 | netfile = os.path.join(os.path.join(os.path.join( |
304 | 170 | fs, 'etc'), 'network'), 'interfaces') | 170 | fs, 'etc'), 'network'), 'interfaces') |
305 | 171 | yield execute('sudo tee %s' % netfile, net) | 171 | yield execute('sudo tee %s' % netfile, net) |
306 | 172 | |||
307 | 173 | 172 | ||
308 | === modified file 'nova/compute/monitor.py' | |||
309 | --- nova/compute/monitor.py 2010-08-16 12:16:21 +0000 | |||
310 | +++ nova/compute/monitor.py 2010-10-22 00:19:41 +0000 | |||
311 | @@ -85,8 +85,7 @@ | |||
312 | 85 | 'RRA:MAX:0.5:6:800', | 85 | 'RRA:MAX:0.5:6:800', |
313 | 86 | 'RRA:MAX:0.5:24:800', | 86 | 'RRA:MAX:0.5:24:800', |
314 | 87 | 'RRA:MAX:0.5:444:800', | 87 | 'RRA:MAX:0.5:444:800', |
317 | 88 | ] | 88 | ]} |
316 | 89 | } | ||
318 | 90 | 89 | ||
319 | 91 | 90 | ||
320 | 92 | utcnow = datetime.datetime.utcnow | 91 | utcnow = datetime.datetime.utcnow |
321 | @@ -97,15 +96,12 @@ | |||
322 | 97 | Updates the specified RRD file. | 96 | Updates the specified RRD file. |
323 | 98 | """ | 97 | """ |
324 | 99 | filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name) | 98 | filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name) |
326 | 100 | 99 | ||
327 | 101 | if not os.path.exists(filename): | 100 | if not os.path.exists(filename): |
328 | 102 | init_rrd(instance, name) | 101 | init_rrd(instance, name) |
330 | 103 | 102 | ||
331 | 104 | timestamp = int(time.mktime(utcnow().timetuple())) | 103 | timestamp = int(time.mktime(utcnow().timetuple())) |
336 | 105 | rrdtool.update ( | 104 | rrdtool.update(filename, '%d:%s' % (timestamp, data)) |
333 | 106 | filename, | ||
334 | 107 | '%d:%s' % (timestamp, data) | ||
335 | 108 | ) | ||
337 | 109 | 105 | ||
338 | 110 | 106 | ||
339 | 111 | def init_rrd(instance, name): | 107 | def init_rrd(instance, name): |
340 | @@ -113,29 +109,28 @@ | |||
341 | 113 | Initializes the specified RRD file. | 109 | Initializes the specified RRD file. |
342 | 114 | """ | 110 | """ |
343 | 115 | path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id) | 111 | path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id) |
345 | 116 | 112 | ||
346 | 117 | if not os.path.exists(path): | 113 | if not os.path.exists(path): |
347 | 118 | os.makedirs(path) | 114 | os.makedirs(path) |
349 | 119 | 115 | ||
350 | 120 | filename = os.path.join(path, '%s.rrd' % name) | 116 | filename = os.path.join(path, '%s.rrd' % name) |
352 | 121 | 117 | ||
353 | 122 | if not os.path.exists(filename): | 118 | if not os.path.exists(filename): |
355 | 123 | rrdtool.create ( | 119 | rrdtool.create( |
356 | 124 | filename, | 120 | filename, |
357 | 125 | '--step', '%d' % FLAGS.monitoring_instances_step, | 121 | '--step', '%d' % FLAGS.monitoring_instances_step, |
358 | 126 | '--start', '0', | 122 | '--start', '0', |
363 | 127 | *RRD_VALUES[name] | 123 | *RRD_VALUES[name]) |
364 | 128 | ) | 124 | |
365 | 129 | 125 | ||
362 | 130 | |||
366 | 131 | def graph_cpu(instance, duration): | 126 | def graph_cpu(instance, duration): |
367 | 132 | """ | 127 | """ |
368 | 133 | Creates a graph of cpu usage for the specified instance and duration. | 128 | Creates a graph of cpu usage for the specified instance and duration. |
369 | 134 | """ | 129 | """ |
370 | 135 | path = instance.get_rrd_path() | 130 | path = instance.get_rrd_path() |
371 | 136 | filename = os.path.join(path, 'cpu-%s.png' % duration) | 131 | filename = os.path.join(path, 'cpu-%s.png' % duration) |
374 | 137 | 132 | ||
375 | 138 | rrdtool.graph ( | 133 | rrdtool.graph( |
376 | 139 | filename, | 134 | filename, |
377 | 140 | '--disable-rrdtool-tag', | 135 | '--disable-rrdtool-tag', |
378 | 141 | '--imgformat', 'PNG', | 136 | '--imgformat', 'PNG', |
379 | @@ -146,9 +141,8 @@ | |||
380 | 146 | '-l', '0', | 141 | '-l', '0', |
381 | 147 | '-u', '100', | 142 | '-u', '100', |
382 | 148 | 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'), | 143 | 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'), |
386 | 149 | 'AREA:cpu#eacc00:% CPU', | 144 | 'AREA:cpu#eacc00:% CPU',) |
387 | 150 | ) | 145 | |
385 | 151 | |||
388 | 152 | store_graph(instance.instance_id, filename) | 146 | store_graph(instance.instance_id, filename) |
389 | 153 | 147 | ||
390 | 154 | 148 | ||
391 | @@ -158,8 +152,8 @@ | |||
392 | 158 | """ | 152 | """ |
393 | 159 | path = instance.get_rrd_path() | 153 | path = instance.get_rrd_path() |
394 | 160 | filename = os.path.join(path, 'net-%s.png' % duration) | 154 | filename = os.path.join(path, 'net-%s.png' % duration) |
397 | 161 | 155 | ||
398 | 162 | rrdtool.graph ( | 156 | rrdtool.graph( |
399 | 163 | filename, | 157 | filename, |
400 | 164 | '--disable-rrdtool-tag', | 158 | '--disable-rrdtool-tag', |
401 | 165 | '--imgformat', 'PNG', | 159 | '--imgformat', 'PNG', |
402 | @@ -174,20 +168,19 @@ | |||
403 | 174 | 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'), | 168 | 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'), |
404 | 175 | 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'), | 169 | 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'), |
405 | 176 | 'AREA:rx#00FF00:In traffic', | 170 | 'AREA:rx#00FF00:In traffic', |
409 | 177 | 'LINE1:tx#0000FF:Out traffic', | 171 | 'LINE1:tx#0000FF:Out traffic',) |
410 | 178 | ) | 172 | |
408 | 179 | |||
411 | 180 | store_graph(instance.instance_id, filename) | 173 | store_graph(instance.instance_id, filename) |
412 | 181 | 174 | ||
414 | 182 | 175 | ||
415 | 183 | def graph_disk(instance, duration): | 176 | def graph_disk(instance, duration): |
416 | 184 | """ | 177 | """ |
417 | 185 | Creates a graph of disk usage for the specified duration. | 178 | Creates a graph of disk usage for the specified duration. |
419 | 186 | """ | 179 | """ |
420 | 187 | path = instance.get_rrd_path() | 180 | path = instance.get_rrd_path() |
421 | 188 | filename = os.path.join(path, 'disk-%s.png' % duration) | 181 | filename = os.path.join(path, 'disk-%s.png' % duration) |
424 | 189 | 182 | ||
425 | 190 | rrdtool.graph ( | 183 | rrdtool.graph( |
426 | 191 | filename, | 184 | filename, |
427 | 192 | '--disable-rrdtool-tag', | 185 | '--disable-rrdtool-tag', |
428 | 193 | '--imgformat', 'PNG', | 186 | '--imgformat', 'PNG', |
429 | @@ -202,9 +195,8 @@ | |||
430 | 202 | 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'), | 195 | 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'), |
431 | 203 | 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'), | 196 | 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'), |
432 | 204 | 'AREA:rd#00FF00:Read', | 197 | 'AREA:rd#00FF00:Read', |
436 | 205 | 'LINE1:wr#0000FF:Write', | 198 | 'LINE1:wr#0000FF:Write',) |
437 | 206 | ) | 199 | |
435 | 207 | |||
438 | 208 | store_graph(instance.instance_id, filename) | 200 | store_graph(instance.instance_id, filename) |
439 | 209 | 201 | ||
440 | 210 | 202 | ||
441 | @@ -224,17 +216,16 @@ | |||
442 | 224 | is_secure=False, | 216 | is_secure=False, |
443 | 225 | calling_format=boto.s3.connection.OrdinaryCallingFormat(), | 217 | calling_format=boto.s3.connection.OrdinaryCallingFormat(), |
444 | 226 | port=FLAGS.s3_port, | 218 | port=FLAGS.s3_port, |
447 | 227 | host=FLAGS.s3_host | 219 | host=FLAGS.s3_host) |
446 | 228 | ) | ||
448 | 229 | bucket_name = '_%s.monitor' % instance_id | 220 | bucket_name = '_%s.monitor' % instance_id |
450 | 230 | 221 | ||
451 | 231 | # Object store isn't creating the bucket like it should currently | 222 | # Object store isn't creating the bucket like it should currently |
452 | 232 | # when it is first requested, so have to catch and create manually. | 223 | # when it is first requested, so have to catch and create manually. |
453 | 233 | try: | 224 | try: |
454 | 234 | bucket = s3.get_bucket(bucket_name) | 225 | bucket = s3.get_bucket(bucket_name) |
455 | 235 | except Exception: | 226 | except Exception: |
456 | 236 | bucket = s3.create_bucket(bucket_name) | 227 | bucket = s3.create_bucket(bucket_name) |
458 | 237 | 228 | ||
459 | 238 | key = boto.s3.Key(bucket) | 229 | key = boto.s3.Key(bucket) |
460 | 239 | key.key = os.path.basename(filename) | 230 | key.key = os.path.basename(filename) |
461 | 240 | key.set_contents_from_filename(filename) | 231 | key.set_contents_from_filename(filename) |
462 | @@ -247,18 +238,18 @@ | |||
463 | 247 | self.last_updated = datetime.datetime.min | 238 | self.last_updated = datetime.datetime.min |
464 | 248 | self.cputime = 0 | 239 | self.cputime = 0 |
465 | 249 | self.cputime_last_updated = None | 240 | self.cputime_last_updated = None |
467 | 250 | 241 | ||
468 | 251 | init_rrd(self, 'cpu') | 242 | init_rrd(self, 'cpu') |
469 | 252 | init_rrd(self, 'net') | 243 | init_rrd(self, 'net') |
470 | 253 | init_rrd(self, 'disk') | 244 | init_rrd(self, 'disk') |
472 | 254 | 245 | ||
473 | 255 | def needs_update(self): | 246 | def needs_update(self): |
474 | 256 | """ | 247 | """ |
475 | 257 | Indicates whether this instance is due to have its statistics updated. | 248 | Indicates whether this instance is due to have its statistics updated. |
476 | 258 | """ | 249 | """ |
477 | 259 | delta = utcnow() - self.last_updated | 250 | delta = utcnow() - self.last_updated |
478 | 260 | return delta.seconds >= FLAGS.monitoring_instances_step | 251 | return delta.seconds >= FLAGS.monitoring_instances_step |
480 | 261 | 252 | ||
481 | 262 | def update(self): | 253 | def update(self): |
482 | 263 | """ | 254 | """ |
483 | 264 | Updates the instances statistics and stores the resulting graphs | 255 | Updates the instances statistics and stores the resulting graphs |
484 | @@ -271,7 +262,7 @@ | |||
485 | 271 | if data != None: | 262 | if data != None: |
486 | 272 | logging.debug('CPU: %s', data) | 263 | logging.debug('CPU: %s', data) |
487 | 273 | update_rrd(self, 'cpu', data) | 264 | update_rrd(self, 'cpu', data) |
489 | 274 | 265 | ||
490 | 275 | data = self.fetch_net_stats() | 266 | data = self.fetch_net_stats() |
491 | 276 | logging.debug('NET: %s', data) | 267 | logging.debug('NET: %s', data) |
492 | 277 | update_rrd(self, 'net', data) | 268 | update_rrd(self, 'net', data) |
493 | @@ -279,7 +270,7 @@ | |||
494 | 279 | data = self.fetch_disk_stats() | 270 | data = self.fetch_disk_stats() |
495 | 280 | logging.debug('DISK: %s', data) | 271 | logging.debug('DISK: %s', data) |
496 | 281 | update_rrd(self, 'disk', data) | 272 | update_rrd(self, 'disk', data) |
498 | 282 | 273 | ||
499 | 283 | # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls | 274 | # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls |
500 | 284 | # and make the methods @defer.inlineCallbacks. | 275 | # and make the methods @defer.inlineCallbacks. |
501 | 285 | graph_cpu(self, '1d') | 276 | graph_cpu(self, '1d') |
502 | @@ -297,13 +288,13 @@ | |||
503 | 297 | logging.exception('unexpected error during update') | 288 | logging.exception('unexpected error during update') |
504 | 298 | 289 | ||
505 | 299 | self.last_updated = utcnow() | 290 | self.last_updated = utcnow() |
507 | 300 | 291 | ||
508 | 301 | def get_rrd_path(self): | 292 | def get_rrd_path(self): |
509 | 302 | """ | 293 | """ |
510 | 303 | Returns the path to where RRD files are stored. | 294 | Returns the path to where RRD files are stored. |
511 | 304 | """ | 295 | """ |
512 | 305 | return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id) | 296 | return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id) |
514 | 306 | 297 | ||
515 | 307 | def fetch_cpu_stats(self): | 298 | def fetch_cpu_stats(self): |
516 | 308 | """ | 299 | """ |
517 | 309 | Returns cpu usage statistics for this instance. | 300 | Returns cpu usage statistics for this instance. |
518 | @@ -327,17 +318,17 @@ | |||
519 | 327 | # Calculate the number of seconds between samples. | 318 | # Calculate the number of seconds between samples. |
520 | 328 | d = self.cputime_last_updated - cputime_last_updated | 319 | d = self.cputime_last_updated - cputime_last_updated |
521 | 329 | t = d.days * 86400 + d.seconds | 320 | t = d.days * 86400 + d.seconds |
523 | 330 | 321 | ||
524 | 331 | logging.debug('t = %d', t) | 322 | logging.debug('t = %d', t) |
525 | 332 | 323 | ||
526 | 333 | # Calculate change over time in number of nanoseconds of CPU time used. | 324 | # Calculate change over time in number of nanoseconds of CPU time used. |
527 | 334 | cputime_delta = self.cputime - cputime_last | 325 | cputime_delta = self.cputime - cputime_last |
529 | 335 | 326 | ||
530 | 336 | logging.debug('cputime_delta = %s', cputime_delta) | 327 | logging.debug('cputime_delta = %s', cputime_delta) |
531 | 337 | 328 | ||
532 | 338 | # Get the number of virtual cpus in this domain. | 329 | # Get the number of virtual cpus in this domain. |
533 | 339 | vcpus = int(info['num_cpu']) | 330 | vcpus = int(info['num_cpu']) |
535 | 340 | 331 | ||
536 | 341 | logging.debug('vcpus = %d', vcpus) | 332 | logging.debug('vcpus = %d', vcpus) |
537 | 342 | 333 | ||
538 | 343 | # Calculate CPU % used and cap at 100. | 334 | # Calculate CPU % used and cap at 100. |
539 | @@ -349,9 +340,9 @@ | |||
540 | 349 | """ | 340 | """ |
541 | 350 | rd = 0 | 341 | rd = 0 |
542 | 351 | wr = 0 | 342 | wr = 0 |
544 | 352 | 343 | ||
545 | 353 | disks = self.conn.get_disks(self.instance_id) | 344 | disks = self.conn.get_disks(self.instance_id) |
547 | 354 | 345 | ||
548 | 355 | # Aggregate the read and write totals. | 346 | # Aggregate the read and write totals. |
549 | 356 | for disk in disks: | 347 | for disk in disks: |
550 | 357 | try: | 348 | try: |
551 | @@ -363,7 +354,7 @@ | |||
552 | 363 | logging.error('Cannot get blockstats for "%s" on "%s"', | 354 | logging.error('Cannot get blockstats for "%s" on "%s"', |
553 | 364 | disk, self.instance_id) | 355 | disk, self.instance_id) |
554 | 365 | raise | 356 | raise |
556 | 366 | 357 | ||
557 | 367 | return '%d:%d' % (rd, wr) | 358 | return '%d:%d' % (rd, wr) |
558 | 368 | 359 | ||
559 | 369 | def fetch_net_stats(self): | 360 | def fetch_net_stats(self): |
560 | @@ -372,9 +363,9 @@ | |||
561 | 372 | """ | 363 | """ |
562 | 373 | rx = 0 | 364 | rx = 0 |
563 | 374 | tx = 0 | 365 | tx = 0 |
565 | 375 | 366 | ||
566 | 376 | interfaces = self.conn.get_interfaces(self.instance_id) | 367 | interfaces = self.conn.get_interfaces(self.instance_id) |
568 | 377 | 368 | ||
569 | 378 | # Aggregate the in and out totals. | 369 | # Aggregate the in and out totals. |
570 | 379 | for interface in interfaces: | 370 | for interface in interfaces: |
571 | 380 | try: | 371 | try: |
572 | @@ -385,7 +376,7 @@ | |||
573 | 385 | logging.error('Cannot get ifstats for "%s" on "%s"', | 376 | logging.error('Cannot get ifstats for "%s" on "%s"', |
574 | 386 | interface, self.instance_id) | 377 | interface, self.instance_id) |
575 | 387 | raise | 378 | raise |
577 | 388 | 379 | ||
578 | 389 | return '%d:%d' % (rx, tx) | 380 | return '%d:%d' % (rx, tx) |
579 | 390 | 381 | ||
580 | 391 | 382 | ||
581 | @@ -400,16 +391,16 @@ | |||
582 | 400 | """ | 391 | """ |
583 | 401 | self._instances = {} | 392 | self._instances = {} |
584 | 402 | self._loop = task.LoopingCall(self.updateInstances) | 393 | self._loop = task.LoopingCall(self.updateInstances) |
586 | 403 | 394 | ||
587 | 404 | def startService(self): | 395 | def startService(self): |
588 | 405 | self._instances = {} | 396 | self._instances = {} |
589 | 406 | self._loop.start(interval=FLAGS.monitoring_instances_delay) | 397 | self._loop.start(interval=FLAGS.monitoring_instances_delay) |
590 | 407 | service.Service.startService(self) | 398 | service.Service.startService(self) |
592 | 408 | 399 | ||
593 | 409 | def stopService(self): | 400 | def stopService(self): |
594 | 410 | self._loop.stop() | 401 | self._loop.stop() |
595 | 411 | service.Service.stopService(self) | 402 | service.Service.stopService(self) |
597 | 412 | 403 | ||
598 | 413 | def updateInstances(self): | 404 | def updateInstances(self): |
599 | 414 | """ | 405 | """ |
600 | 415 | Update resource usage for all running instances. | 406 | Update resource usage for all running instances. |
601 | @@ -420,20 +411,20 @@ | |||
602 | 420 | logging.exception('unexpected exception getting connection') | 411 | logging.exception('unexpected exception getting connection') |
603 | 421 | time.sleep(FLAGS.monitoring_instances_delay) | 412 | time.sleep(FLAGS.monitoring_instances_delay) |
604 | 422 | return | 413 | return |
606 | 423 | 414 | ||
607 | 424 | domain_ids = conn.list_instances() | 415 | domain_ids = conn.list_instances() |
608 | 425 | try: | 416 | try: |
610 | 426 | self.updateInstances_(conn, domain_ids) | 417 | self.updateInstances_(conn, domain_ids) |
611 | 427 | except Exception, exn: | 418 | except Exception, exn: |
613 | 428 | logging.exception('updateInstances_') | 419 | logging.exception('updateInstances_') |
614 | 429 | 420 | ||
615 | 430 | def updateInstances_(self, conn, domain_ids): | 421 | def updateInstances_(self, conn, domain_ids): |
616 | 431 | for domain_id in domain_ids: | 422 | for domain_id in domain_ids: |
618 | 432 | if not domain_id in self._instances: | 423 | if not domain_id in self._instances: |
619 | 433 | instance = Instance(conn, domain_id) | 424 | instance = Instance(conn, domain_id) |
620 | 434 | self._instances[domain_id] = instance | 425 | self._instances[domain_id] = instance |
621 | 435 | logging.debug('Found instance: %s', domain_id) | 426 | logging.debug('Found instance: %s', domain_id) |
623 | 436 | 427 | ||
624 | 437 | for key in self._instances.keys(): | 428 | for key in self._instances.keys(): |
625 | 438 | instance = self._instances[key] | 429 | instance = self._instances[key] |
626 | 439 | if instance.needs_update(): | 430 | if instance.needs_update(): |
627 | 440 | 431 | ||
628 | === modified file 'nova/compute/power_state.py' | |||
629 | --- nova/compute/power_state.py 2010-07-18 17:15:12 +0000 | |||
630 | +++ nova/compute/power_state.py 2010-10-22 00:19:41 +0000 | |||
631 | @@ -30,12 +30,11 @@ | |||
632 | 30 | 30 | ||
633 | 31 | def name(code): | 31 | def name(code): |
634 | 32 | d = { | 32 | d = { |
639 | 33 | NOSTATE : 'pending', | 33 | NOSTATE: 'pending', |
640 | 34 | RUNNING : 'running', | 34 | RUNNING: 'running', |
641 | 35 | BLOCKED : 'blocked', | 35 | BLOCKED: 'blocked', |
642 | 36 | PAUSED : 'paused', | 36 | PAUSED: 'paused', |
643 | 37 | SHUTDOWN: 'shutdown', | 37 | SHUTDOWN: 'shutdown', |
647 | 38 | SHUTOFF : 'shutdown', | 38 | SHUTOFF: 'shutdown', |
648 | 39 | CRASHED : 'crashed', | 39 | CRASHED: 'crashed'} |
646 | 40 | } | ||
649 | 41 | return d[code] | 40 | return d[code] |
650 | 42 | 41 | ||
651 | === modified file 'nova/image/service.py' | |||
652 | --- nova/image/service.py 2010-10-15 20:24:02 +0000 | |||
653 | +++ nova/image/service.py 2010-10-22 00:19:41 +0000 | |||
654 | @@ -30,7 +30,8 @@ | |||
655 | 30 | flags.DEFINE_string('glance_teller_port', '9191', | 30 | flags.DEFINE_string('glance_teller_port', '9191', |
656 | 31 | 'Port for Glance\'s Teller service') | 31 | 'Port for Glance\'s Teller service') |
657 | 32 | flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1', | 32 | flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1', |
659 | 33 | 'IP address or URL where Glance\'s Parallax service resides') | 33 | 'IP address or URL where Glance\'s Parallax service ' |
660 | 34 | 'resides') | ||
661 | 34 | flags.DEFINE_string('glance_parallax_port', '9292', | 35 | flags.DEFINE_string('glance_parallax_port', '9292', |
662 | 35 | 'Port for Glance\'s Parallax service') | 36 | 'Port for Glance\'s Parallax service') |
663 | 36 | 37 | ||
664 | @@ -120,10 +121,10 @@ | |||
665 | 120 | 121 | ||
666 | 121 | def delete(self, image_id): | 122 | def delete(self, image_id): |
667 | 122 | """ | 123 | """ |
670 | 123 | Delete the given image. | 124 | Delete the given image. |
671 | 124 | 125 | ||
672 | 125 | :raises NotFound if the image does not exist. | 126 | :raises NotFound if the image does not exist. |
674 | 126 | 127 | ||
675 | 127 | """ | 128 | """ |
676 | 128 | raise NotImplementedError | 129 | raise NotImplementedError |
677 | 129 | 130 | ||
678 | @@ -131,14 +132,14 @@ | |||
679 | 131 | class LocalImageService(BaseImageService): | 132 | class LocalImageService(BaseImageService): |
680 | 132 | 133 | ||
681 | 133 | """Image service storing images to local disk. | 134 | """Image service storing images to local disk. |
683 | 134 | 135 | ||
684 | 135 | It assumes that image_ids are integers.""" | 136 | It assumes that image_ids are integers.""" |
685 | 136 | 137 | ||
686 | 137 | def __init__(self): | 138 | def __init__(self): |
687 | 138 | self._path = "/tmp/nova/images" | 139 | self._path = "/tmp/nova/images" |
688 | 139 | try: | 140 | try: |
689 | 140 | os.makedirs(self._path) | 141 | os.makedirs(self._path) |
691 | 141 | except OSError: # exists | 142 | except OSError: # Exists |
692 | 142 | pass | 143 | pass |
693 | 143 | 144 | ||
694 | 144 | def _path_to(self, image_id): | 145 | def _path_to(self, image_id): |
695 | @@ -156,7 +157,7 @@ | |||
696 | 156 | 157 | ||
697 | 157 | def show(self, id): | 158 | def show(self, id): |
698 | 158 | try: | 159 | try: |
700 | 159 | return pickle.load(open(self._path_to(id))) | 160 | return pickle.load(open(self._path_to(id))) |
701 | 160 | except IOError: | 161 | except IOError: |
702 | 161 | raise exception.NotFound | 162 | raise exception.NotFound |
703 | 162 | 163 | ||
704 | @@ -164,7 +165,7 @@ | |||
705 | 164 | """ | 165 | """ |
706 | 165 | Store the image data and return the new image id. | 166 | Store the image data and return the new image id. |
707 | 166 | """ | 167 | """ |
709 | 167 | id = random.randint(0, 2**32-1) | 168 | id = random.randint(0, 2 ** 32 - 1) |
710 | 168 | data['id'] = id | 169 | data['id'] = id |
711 | 169 | self.update(id, data) | 170 | self.update(id, data) |
712 | 170 | return id | 171 | return id |
713 | 171 | 172 | ||
714 | === modified file 'nova/image/services/glance/__init__.py' | |||
715 | --- nova/image/services/glance/__init__.py 2010-10-15 20:24:02 +0000 | |||
716 | +++ nova/image/services/glance/__init__.py 2010-10-22 00:19:41 +0000 | |||
717 | @@ -30,6 +30,7 @@ | |||
718 | 30 | 30 | ||
719 | 31 | FLAGS = flags.FLAGS | 31 | FLAGS = flags.FLAGS |
720 | 32 | 32 | ||
721 | 33 | |||
722 | 33 | class TellerClient(object): | 34 | class TellerClient(object): |
723 | 34 | 35 | ||
724 | 35 | def __init__(self): | 36 | def __init__(self): |
725 | @@ -153,7 +154,6 @@ | |||
726 | 153 | 154 | ||
727 | 154 | 155 | ||
728 | 155 | class GlanceImageService(nova.image.service.BaseImageService): | 156 | class GlanceImageService(nova.image.service.BaseImageService): |
729 | 156 | |||
730 | 157 | """Provides storage and retrieval of disk image objects within Glance.""" | 157 | """Provides storage and retrieval of disk image objects within Glance.""" |
731 | 158 | 158 | ||
732 | 159 | def __init__(self): | 159 | def __init__(self): |
733 | @@ -202,10 +202,10 @@ | |||
734 | 202 | 202 | ||
735 | 203 | def delete(self, image_id): | 203 | def delete(self, image_id): |
736 | 204 | """ | 204 | """ |
739 | 205 | Delete the given image. | 205 | Delete the given image. |
740 | 206 | 206 | ||
741 | 207 | :raises NotFound if the image does not exist. | 207 | :raises NotFound if the image does not exist. |
743 | 208 | 208 | ||
744 | 209 | """ | 209 | """ |
745 | 210 | self.parallax.delete_image_metadata(image_id) | 210 | self.parallax.delete_image_metadata(image_id) |
746 | 211 | 211 | ||
747 | 212 | 212 | ||
748 | === modified file 'nova/network/linux_net.py' | |||
749 | --- nova/network/linux_net.py 2010-10-20 20:54:53 +0000 | |||
750 | +++ nova/network/linux_net.py 2010-10-22 00:19:41 +0000 | |||
751 | @@ -53,6 +53,7 @@ | |||
752 | 53 | 53 | ||
753 | 54 | DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] | 54 | DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] |
754 | 55 | 55 | ||
755 | 56 | |||
756 | 56 | def init_host(): | 57 | def init_host(): |
757 | 57 | """Basic networking setup goes here""" | 58 | """Basic networking setup goes here""" |
758 | 58 | # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port | 59 | # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port |
759 | @@ -72,6 +73,7 @@ | |||
760 | 72 | _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % | 73 | _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % |
761 | 73 | {'range': FLAGS.fixed_range}) | 74 | {'range': FLAGS.fixed_range}) |
762 | 74 | 75 | ||
763 | 76 | |||
764 | 75 | def bind_floating_ip(floating_ip): | 77 | def bind_floating_ip(floating_ip): |
765 | 76 | """Bind ip to public interface""" | 78 | """Bind ip to public interface""" |
766 | 77 | _execute("sudo ip addr add %s dev %s" % (floating_ip, | 79 | _execute("sudo ip addr add %s dev %s" % (floating_ip, |
767 | @@ -103,7 +105,7 @@ | |||
768 | 103 | _confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT" | 105 | _confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT" |
769 | 104 | % (fixed_ip)) | 106 | % (fixed_ip)) |
770 | 105 | for (protocol, port) in DEFAULT_PORTS: | 107 | for (protocol, port) in DEFAULT_PORTS: |
772 | 106 | _confirm_rule("FORWARD","-d %s -p %s --dport %s -j ACCEPT" | 108 | _confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" |
773 | 107 | % (fixed_ip, protocol, port)) | 109 | % (fixed_ip, protocol, port)) |
774 | 108 | 110 | ||
775 | 109 | 111 | ||
776 | @@ -189,7 +191,8 @@ | |||
777 | 189 | 191 | ||
778 | 190 | # if dnsmasq is already running, then tell it to reload | 192 | # if dnsmasq is already running, then tell it to reload |
779 | 191 | if pid: | 193 | if pid: |
781 | 192 | out, _err = _execute('cat /proc/%d/cmdline' % pid, check_exit_code=False) | 194 | out, _err = _execute('cat /proc/%d/cmdline' % pid, |
782 | 195 | check_exit_code=False) | ||
783 | 193 | if conffile in out: | 196 | if conffile in out: |
784 | 194 | try: | 197 | try: |
785 | 195 | _execute('sudo kill -HUP %d' % pid) | 198 | _execute('sudo kill -HUP %d' % pid) |
786 | @@ -233,7 +236,8 @@ | |||
787 | 233 | """Delete and re-add iptables rule""" | 236 | """Delete and re-add iptables rule""" |
788 | 234 | if FLAGS.use_nova_chains: | 237 | if FLAGS.use_nova_chains: |
789 | 235 | chain = "nova_%s" % chain.lower() | 238 | chain = "nova_%s" % chain.lower() |
791 | 236 | _execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False) | 239 | _execute("sudo iptables --delete %s %s" % (chain, cmd), |
792 | 240 | check_exit_code=False) | ||
793 | 237 | _execute("sudo iptables -I %s %s" % (chain, cmd)) | 241 | _execute("sudo iptables -I %s %s" % (chain, cmd)) |
794 | 238 | 242 | ||
795 | 239 | 243 | ||
796 | 240 | 244 | ||
797 | === modified file 'nova/network/manager.py' | |||
798 | --- nova/network/manager.py 2010-10-14 23:44:58 +0000 | |||
799 | +++ nova/network/manager.py 2010-10-22 00:19:41 +0000 | |||
800 | @@ -49,7 +49,8 @@ | |||
801 | 49 | flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') | 49 | flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') |
802 | 50 | flags.DEFINE_integer('network_size', 256, | 50 | flags.DEFINE_integer('network_size', 256, |
803 | 51 | 'Number of addresses in each private subnet') | 51 | 'Number of addresses in each private subnet') |
805 | 52 | flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') | 52 | flags.DEFINE_string('floating_range', '4.4.4.0/24', |
806 | 53 | 'Floating IP address block') | ||
807 | 53 | flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') | 54 | flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') |
808 | 54 | flags.DEFINE_integer('cnt_vpn_clients', 5, | 55 | flags.DEFINE_integer('cnt_vpn_clients', 5, |
809 | 55 | 'Number of addresses reserved for vpn clients') | 56 | 'Number of addresses reserved for vpn clients') |
810 | @@ -287,7 +288,6 @@ | |||
811 | 287 | self.db.network_update(context, network_id, net) | 288 | self.db.network_update(context, network_id, net) |
812 | 288 | 289 | ||
813 | 289 | 290 | ||
814 | 290 | |||
815 | 291 | class FlatDHCPManager(NetworkManager): | 291 | class FlatDHCPManager(NetworkManager): |
816 | 292 | """Flat networking with dhcp""" | 292 | """Flat networking with dhcp""" |
817 | 293 | 293 | ||
818 | @@ -432,4 +432,3 @@ | |||
819 | 432 | """Number of reserved ips at the top of the range""" | 432 | """Number of reserved ips at the top of the range""" |
820 | 433 | parent_reserved = super(VlanManager, self)._top_reserved_ips | 433 | parent_reserved = super(VlanManager, self)._top_reserved_ips |
821 | 434 | return parent_reserved + FLAGS.cnt_vpn_clients | 434 | return parent_reserved + FLAGS.cnt_vpn_clients |
822 | 435 | |||
823 | 436 | 435 | ||
824 | === modified file 'nova/objectstore/bucket.py' | |||
825 | --- nova/objectstore/bucket.py 2010-10-14 05:07:43 +0000 | |||
826 | +++ nova/objectstore/bucket.py 2010-10-22 00:19:41 +0000 | |||
827 | @@ -69,7 +69,8 @@ | |||
828 | 69 | """Create a new bucket owned by a project. | 69 | """Create a new bucket owned by a project. |
829 | 70 | 70 | ||
830 | 71 | @bucket_name: a string representing the name of the bucket to create | 71 | @bucket_name: a string representing the name of the bucket to create |
832 | 72 | @context: a nova.auth.api.ApiContext object representing who owns the bucket. | 72 | @context: a nova.auth.api.ApiContext object representing who owns the |
833 | 73 | bucket. | ||
834 | 73 | 74 | ||
835 | 74 | Raises: | 75 | Raises: |
836 | 75 | NotAuthorized: if the bucket is already exists or has invalid name | 76 | NotAuthorized: if the bucket is already exists or has invalid name |
837 | @@ -77,12 +78,12 @@ | |||
838 | 77 | path = os.path.abspath(os.path.join( | 78 | path = os.path.abspath(os.path.join( |
839 | 78 | FLAGS.buckets_path, bucket_name)) | 79 | FLAGS.buckets_path, bucket_name)) |
840 | 79 | if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ | 80 | if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ |
843 | 80 | os.path.exists(path): | 81 | os.path.exists(path): |
844 | 81 | raise exception.NotAuthorized() | 82 | raise exception.NotAuthorized() |
845 | 82 | 83 | ||
846 | 83 | os.makedirs(path) | 84 | os.makedirs(path) |
847 | 84 | 85 | ||
849 | 85 | with open(path+'.json', 'w') as f: | 86 | with open(path + '.json', 'w') as f: |
850 | 86 | json.dump({'ownerId': context.project_id}, f) | 87 | json.dump({'ownerId': context.project_id}, f) |
851 | 87 | 88 | ||
852 | 88 | @property | 89 | @property |
853 | @@ -99,22 +100,25 @@ | |||
854 | 99 | @property | 100 | @property |
855 | 100 | def owner_id(self): | 101 | def owner_id(self): |
856 | 101 | try: | 102 | try: |
858 | 102 | with open(self.path+'.json') as f: | 103 | with open(self.path + '.json') as f: |
859 | 103 | return json.load(f)['ownerId'] | 104 | return json.load(f)['ownerId'] |
860 | 104 | except: | 105 | except: |
861 | 105 | return None | 106 | return None |
862 | 106 | 107 | ||
863 | 107 | def is_authorized(self, context): | 108 | def is_authorized(self, context): |
864 | 108 | try: | 109 | try: |
866 | 109 | return context.user.is_admin() or self.owner_id == context.project_id | 110 | return context.user.is_admin() or \ |
867 | 111 | self.owner_id == context.project_id | ||
868 | 110 | except Exception, e: | 112 | except Exception, e: |
869 | 111 | return False | 113 | return False |
870 | 112 | 114 | ||
871 | 113 | def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): | 115 | def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): |
872 | 114 | object_names = [] | 116 | object_names = [] |
873 | 117 | path_length = len(self.path) | ||
874 | 115 | for root, dirs, files in os.walk(self.path): | 118 | for root, dirs, files in os.walk(self.path): |
875 | 116 | for file_name in files: | 119 | for file_name in files: |
877 | 117 | object_names.append(os.path.join(root, file_name)[len(self.path)+1:]) | 120 | object_name = os.path.join(root, file_name)[path_length + 1:] |
878 | 121 | object_names.append(object_name) | ||
879 | 118 | object_names.sort() | 122 | object_names.sort() |
880 | 119 | contents = [] | 123 | contents = [] |
881 | 120 | 124 | ||
882 | @@ -164,7 +168,7 @@ | |||
883 | 164 | if len(os.listdir(self.path)) > 0: | 168 | if len(os.listdir(self.path)) > 0: |
884 | 165 | raise exception.NotEmpty() | 169 | raise exception.NotEmpty() |
885 | 166 | os.rmdir(self.path) | 170 | os.rmdir(self.path) |
887 | 167 | os.remove(self.path+'.json') | 171 | os.remove(self.path + '.json') |
888 | 168 | 172 | ||
889 | 169 | def __getitem__(self, key): | 173 | def __getitem__(self, key): |
890 | 170 | return stored.Object(self, key) | 174 | return stored.Object(self, key) |
891 | 171 | 175 | ||
892 | === modified file 'nova/objectstore/handler.py' | |||
893 | --- nova/objectstore/handler.py 2010-10-19 23:57:24 +0000 | |||
894 | +++ nova/objectstore/handler.py 2010-10-22 00:19:41 +0000 | |||
895 | @@ -136,6 +136,7 @@ | |||
896 | 136 | logging.debug("Authentication Failure: %s", ex) | 136 | logging.debug("Authentication Failure: %s", ex) |
897 | 137 | raise exception.NotAuthorized() | 137 | raise exception.NotAuthorized() |
898 | 138 | 138 | ||
899 | 139 | |||
900 | 139 | class ErrorHandlingResource(resource.Resource): | 140 | class ErrorHandlingResource(resource.Resource): |
901 | 140 | """Maps exceptions to 404 / 401 codes. Won't work for | 141 | """Maps exceptions to 404 / 401 codes. Won't work for |
902 | 141 | exceptions thrown after NOT_DONE_YET is returned. | 142 | exceptions thrown after NOT_DONE_YET is returned. |
903 | @@ -162,7 +163,7 @@ | |||
904 | 162 | def __init__(self): | 163 | def __init__(self): |
905 | 163 | ErrorHandlingResource.__init__(self) | 164 | ErrorHandlingResource.__init__(self) |
906 | 164 | 165 | ||
908 | 165 | def getChild(self, name, request): # pylint: disable-msg=C0103 | 166 | def getChild(self, name, request): # pylint: disable-msg=C0103 |
909 | 166 | """Returns either the image or bucket resource""" | 167 | """Returns either the image or bucket resource""" |
910 | 167 | request.context = get_context(request) | 168 | request.context = get_context(request) |
911 | 168 | if name == '': | 169 | if name == '': |
912 | @@ -172,7 +173,7 @@ | |||
913 | 172 | else: | 173 | else: |
914 | 173 | return BucketResource(name) | 174 | return BucketResource(name) |
915 | 174 | 175 | ||
917 | 175 | def render_GET(self, request): # pylint: disable-msg=R0201 | 176 | def render_GET(self, request): # pylint: disable-msg=R0201 |
918 | 176 | """Renders the GET request for a list of buckets as XML""" | 177 | """Renders the GET request for a list of buckets as XML""" |
919 | 177 | logging.debug('List of buckets requested') | 178 | logging.debug('List of buckets requested') |
920 | 178 | buckets = [b for b in bucket.Bucket.all() \ | 179 | buckets = [b for b in bucket.Bucket.all() \ |
921 | @@ -321,11 +322,13 @@ | |||
922 | 321 | if not self.img.is_authorized(request.context, True): | 322 | if not self.img.is_authorized(request.context, True): |
923 | 322 | raise exception.NotAuthorized() | 323 | raise exception.NotAuthorized() |
924 | 323 | return static.File(self.img.image_path, | 324 | return static.File(self.img.image_path, |
927 | 324 | defaultType='application/octet-stream' | 325 | defaultType='application/octet-stream').\ |
928 | 325 | ).render_GET(request) | 326 | render_GET(request) |
929 | 327 | |||
930 | 326 | 328 | ||
931 | 327 | class ImagesResource(resource.Resource): | 329 | class ImagesResource(resource.Resource): |
932 | 328 | """A web resource representing a list of images""" | 330 | """A web resource representing a list of images""" |
933 | 331 | |||
934 | 329 | def getChild(self, name, _request): | 332 | def getChild(self, name, _request): |
935 | 330 | """Returns itself or an ImageResource if no name given""" | 333 | """Returns itself or an ImageResource if no name given""" |
936 | 331 | if name == '': | 334 | if name == '': |
937 | @@ -333,7 +336,7 @@ | |||
938 | 333 | else: | 336 | else: |
939 | 334 | return ImageResource(name) | 337 | return ImageResource(name) |
940 | 335 | 338 | ||
942 | 336 | def render_GET(self, request): # pylint: disable-msg=R0201 | 339 | def render_GET(self, request): # pylint: disable-msg=R0201 |
943 | 337 | """ returns a json listing of all images | 340 | """ returns a json listing of all images |
944 | 338 | that a user has permissions to see """ | 341 | that a user has permissions to see """ |
945 | 339 | 342 | ||
946 | @@ -362,7 +365,7 @@ | |||
947 | 362 | request.finish() | 365 | request.finish() |
948 | 363 | return server.NOT_DONE_YET | 366 | return server.NOT_DONE_YET |
949 | 364 | 367 | ||
951 | 365 | def render_PUT(self, request): # pylint: disable-msg=R0201 | 368 | def render_PUT(self, request): # pylint: disable-msg=R0201 |
952 | 366 | """ create a new registered image """ | 369 | """ create a new registered image """ |
953 | 367 | 370 | ||
954 | 368 | image_id = get_argument(request, 'image_id', u'') | 371 | image_id = get_argument(request, 'image_id', u'') |
955 | @@ -383,7 +386,7 @@ | |||
956 | 383 | p.start() | 386 | p.start() |
957 | 384 | return '' | 387 | return '' |
958 | 385 | 388 | ||
960 | 386 | def render_POST(self, request): # pylint: disable-msg=R0201 | 389 | def render_POST(self, request): # pylint: disable-msg=R0201 |
961 | 387 | """Update image attributes: public/private""" | 390 | """Update image attributes: public/private""" |
962 | 388 | 391 | ||
963 | 389 | # image_id required for all requests | 392 | # image_id required for all requests |
964 | @@ -397,7 +400,7 @@ | |||
965 | 397 | if operation: | 400 | if operation: |
966 | 398 | # operation implies publicity toggle | 401 | # operation implies publicity toggle |
967 | 399 | logging.debug("handling publicity toggle") | 402 | logging.debug("handling publicity toggle") |
969 | 400 | image_object.set_public(operation=='add') | 403 | image_object.set_public(operation == 'add') |
970 | 401 | else: | 404 | else: |
971 | 402 | # other attributes imply update | 405 | # other attributes imply update |
972 | 403 | logging.debug("update user fields") | 406 | logging.debug("update user fields") |
973 | @@ -407,7 +410,7 @@ | |||
974 | 407 | image_object.update_user_editable_fields(clean_args) | 410 | image_object.update_user_editable_fields(clean_args) |
975 | 408 | return '' | 411 | return '' |
976 | 409 | 412 | ||
978 | 410 | def render_DELETE(self, request): # pylint: disable-msg=R0201 | 413 | def render_DELETE(self, request): # pylint: disable-msg=R0201 |
979 | 411 | """Delete a registered image""" | 414 | """Delete a registered image""" |
980 | 412 | image_id = get_argument(request, "image_id", u"") | 415 | image_id = get_argument(request, "image_id", u"") |
981 | 413 | image_object = image.Image(image_id) | 416 | image_object = image.Image(image_id) |
982 | 414 | 417 | ||
983 | === modified file 'nova/objectstore/image.py' | |||
984 | --- nova/objectstore/image.py 2010-10-14 05:07:43 +0000 | |||
985 | +++ nova/objectstore/image.py 2010-10-22 00:19:41 +0000 | |||
986 | @@ -48,8 +48,8 @@ | |||
987 | 48 | self.image_id = image_id | 48 | self.image_id = image_id |
988 | 49 | self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id)) | 49 | self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id)) |
989 | 50 | if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \ | 50 | if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \ |
992 | 51 | not os.path.isdir(self.path): | 51 | not os.path.isdir(self.path): |
993 | 52 | raise exception.NotFound | 52 | raise exception.NotFound |
994 | 53 | 53 | ||
995 | 54 | @property | 54 | @property |
996 | 55 | def image_path(self): | 55 | def image_path(self): |
997 | @@ -127,8 +127,8 @@ | |||
998 | 127 | a string of the image id for the kernel | 127 | a string of the image id for the kernel |
999 | 128 | 128 | ||
1000 | 129 | @type ramdisk: bool or str | 129 | @type ramdisk: bool or str |
1003 | 130 | @param ramdisk: either TRUE meaning this partition is a ramdisk image or | 130 | @param ramdisk: either TRUE meaning this partition is a ramdisk image |
1004 | 131 | a string of the image id for the ramdisk | 131 | or a string of the image id for the ramdisk |
1005 | 132 | 132 | ||
1006 | 133 | 133 | ||
1007 | 134 | @type public: bool | 134 | @type public: bool |
1008 | @@ -160,8 +160,7 @@ | |||
1009 | 160 | 'isPublic': public, | 160 | 'isPublic': public, |
1010 | 161 | 'architecture': 'x86_64', | 161 | 'architecture': 'x86_64', |
1011 | 162 | 'imageType': image_type, | 162 | 'imageType': image_type, |
1014 | 163 | 'state': 'available' | 163 | 'state': 'available'} |
1013 | 164 | } | ||
1015 | 165 | 164 | ||
1016 | 166 | if type(kernel) is str and len(kernel) > 0: | 165 | if type(kernel) is str and len(kernel) > 0: |
1017 | 167 | info['kernelId'] = kernel | 166 | info['kernelId'] = kernel |
1018 | @@ -180,7 +179,7 @@ | |||
1019 | 180 | os.makedirs(image_path) | 179 | os.makedirs(image_path) |
1020 | 181 | 180 | ||
1021 | 182 | bucket_name = image_location.split("/")[0] | 181 | bucket_name = image_location.split("/")[0] |
1023 | 183 | manifest_path = image_location[len(bucket_name)+1:] | 182 | manifest_path = image_location[len(bucket_name) + 1:] |
1024 | 184 | bucket_object = bucket.Bucket(bucket_name) | 183 | bucket_object = bucket.Bucket(bucket_name) |
1025 | 185 | 184 | ||
1026 | 186 | manifest = ElementTree.fromstring(bucket_object[manifest_path].read()) | 185 | manifest = ElementTree.fromstring(bucket_object[manifest_path].read()) |
1027 | @@ -204,10 +203,9 @@ | |||
1028 | 204 | 'imageId': image_id, | 203 | 'imageId': image_id, |
1029 | 205 | 'imageLocation': image_location, | 204 | 'imageLocation': image_location, |
1030 | 206 | 'imageOwnerId': context.project_id, | 205 | 'imageOwnerId': context.project_id, |
1035 | 207 | 'isPublic': False, # FIXME: grab public from manifest | 206 | 'isPublic': False, # FIXME: grab public from manifest |
1036 | 208 | 'architecture': 'x86_64', # FIXME: grab architecture from manifest | 207 | 'architecture': 'x86_64', # FIXME: grab architecture from manifest |
1037 | 209 | 'imageType' : image_type | 208 | 'imageType': image_type} |
1034 | 210 | } | ||
1038 | 211 | 209 | ||
1039 | 212 | if kernel_id: | 210 | if kernel_id: |
1040 | 213 | info['kernelId'] = kernel_id | 211 | info['kernelId'] = kernel_id |
1041 | @@ -230,24 +228,29 @@ | |||
1042 | 230 | write_state('decrypting') | 228 | write_state('decrypting') |
1043 | 231 | 229 | ||
1044 | 232 | # FIXME: grab kernelId and ramdiskId from bundle manifest | 230 | # FIXME: grab kernelId and ramdiskId from bundle manifest |
1047 | 233 | encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text) | 231 | hex_key = manifest.find("image/ec2_encrypted_key").text |
1048 | 234 | encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text) | 232 | encrypted_key = binascii.a2b_hex(hex_key) |
1049 | 233 | hex_iv = manifest.find("image/ec2_encrypted_iv").text | ||
1050 | 234 | encrypted_iv = binascii.a2b_hex(hex_iv) | ||
1051 | 235 | cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem") | 235 | cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem") |
1052 | 236 | 236 | ||
1053 | 237 | decrypted_filename = os.path.join(image_path, 'image.tar.gz') | 237 | decrypted_filename = os.path.join(image_path, 'image.tar.gz') |
1055 | 238 | Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename) | 238 | Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, |
1056 | 239 | cloud_private_key, decrypted_filename) | ||
1057 | 239 | 240 | ||
1058 | 240 | write_state('untarring') | 241 | write_state('untarring') |
1059 | 241 | 242 | ||
1060 | 242 | image_file = Image.untarzip_image(image_path, decrypted_filename) | 243 | image_file = Image.untarzip_image(image_path, decrypted_filename) |
1062 | 243 | shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image')) | 244 | shutil.move(os.path.join(image_path, image_file), |
1063 | 245 | os.path.join(image_path, 'image')) | ||
1064 | 244 | 246 | ||
1065 | 245 | write_state('available') | 247 | write_state('available') |
1066 | 246 | os.unlink(decrypted_filename) | 248 | os.unlink(decrypted_filename) |
1067 | 247 | os.unlink(encrypted_filename) | 249 | os.unlink(encrypted_filename) |
1068 | 248 | 250 | ||
1069 | 249 | @staticmethod | 251 | @staticmethod |
1071 | 250 | def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): | 252 | def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, |
1072 | 253 | cloud_private_key, decrypted_filename): | ||
1073 | 251 | key, err = utils.execute( | 254 | key, err = utils.execute( |
1074 | 252 | 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, | 255 | 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, |
1075 | 253 | process_input=encrypted_key, | 256 | process_input=encrypted_key, |
1076 | @@ -259,13 +262,15 @@ | |||
1077 | 259 | process_input=encrypted_iv, | 262 | process_input=encrypted_iv, |
1078 | 260 | check_exit_code=False) | 263 | check_exit_code=False) |
1079 | 261 | if err: | 264 | if err: |
1081 | 262 | raise exception.Error("Failed to decrypt initialization vector: %s" % err) | 265 | raise exception.Error("Failed to decrypt initialization " |
1082 | 266 | "vector: %s" % err) | ||
1083 | 263 | _out, err = utils.execute( | 267 | _out, err = utils.execute( |
1084 | 264 | 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' | 268 | 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' |
1085 | 265 | % (encrypted_filename, key, iv, decrypted_filename), | 269 | % (encrypted_filename, key, iv, decrypted_filename), |
1086 | 266 | check_exit_code=False) | 270 | check_exit_code=False) |
1087 | 267 | if err: | 271 | if err: |
1089 | 268 | raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) | 272 | raise exception.Error("Failed to decrypt image file %s : %s" % |
1090 | 273 | (encrypted_filename, err)) | ||
1091 | 269 | 274 | ||
1092 | 270 | @staticmethod | 275 | @staticmethod |
1093 | 271 | def untarzip_image(path, filename): | 276 | def untarzip_image(path, filename): |
1094 | 272 | 277 | ||
1095 | === modified file 'nova/objectstore/stored.py' | |||
1096 | --- nova/objectstore/stored.py 2010-08-16 12:16:21 +0000 | |||
1097 | +++ nova/objectstore/stored.py 2010-10-22 00:19:41 +0000 | |||
1098 | @@ -50,8 +50,8 @@ | |||
1099 | 50 | return os.path.getmtime(self.path) | 50 | return os.path.getmtime(self.path) |
1100 | 51 | 51 | ||
1101 | 52 | def read(self): | 52 | def read(self): |
1104 | 53 | """ read all contents of key into memory and return """ | 53 | """ read all contents of key into memory and return """ |
1105 | 54 | return self.file.read() | 54 | return self.file.read() |
1106 | 55 | 55 | ||
1107 | 56 | @property | 56 | @property |
1108 | 57 | def file(self): | 57 | def file(self): |
1109 | 58 | 58 | ||
1110 | === modified file 'nova/scheduler/driver.py' | |||
1111 | --- nova/scheduler/driver.py 2010-09-23 09:24:54 +0000 | |||
1112 | +++ nova/scheduler/driver.py 2010-10-22 00:19:41 +0000 | |||
1113 | @@ -31,10 +31,12 @@ | |||
1114 | 31 | flags.DEFINE_integer('service_down_time', 60, | 31 | flags.DEFINE_integer('service_down_time', 60, |
1115 | 32 | 'maximum time since last checkin for up service') | 32 | 'maximum time since last checkin for up service') |
1116 | 33 | 33 | ||
1117 | 34 | |||
1118 | 34 | class NoValidHost(exception.Error): | 35 | class NoValidHost(exception.Error): |
1119 | 35 | """There is no valid host for the command.""" | 36 | """There is no valid host for the command.""" |
1120 | 36 | pass | 37 | pass |
1121 | 37 | 38 | ||
1122 | 39 | |||
1123 | 38 | class Scheduler(object): | 40 | class Scheduler(object): |
1124 | 39 | """The base class that all Scheduler clases should inherit from.""" | 41 | """The base class that all Scheduler clases should inherit from.""" |
1125 | 40 | 42 | ||
1126 | 41 | 43 | ||
1127 | === modified file 'nova/scheduler/manager.py' | |||
1128 | --- nova/scheduler/manager.py 2010-10-14 05:05:21 +0000 | |||
1129 | +++ nova/scheduler/manager.py 2010-10-22 00:19:41 +0000 | |||
1130 | @@ -56,7 +56,8 @@ | |||
1131 | 56 | driver_method = 'schedule_%s' % method | 56 | driver_method = 'schedule_%s' % method |
1132 | 57 | elevated = context.elevated() | 57 | elevated = context.elevated() |
1133 | 58 | try: | 58 | try: |
1135 | 59 | host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) | 59 | host = getattr(self.driver, driver_method)(elevated, *args, |
1136 | 60 | **kwargs) | ||
1137 | 60 | except AttributeError: | 61 | except AttributeError: |
1138 | 61 | host = self.driver.schedule(elevated, topic, *args, **kwargs) | 62 | host = self.driver.schedule(elevated, topic, *args, **kwargs) |
1139 | 62 | 63 | ||
1140 | 63 | 64 | ||
1141 | === modified file 'nova/scheduler/simple.py' | |||
1142 | --- nova/scheduler/simple.py 2010-09-12 03:00:56 +0000 | |||
1143 | +++ nova/scheduler/simple.py 2010-10-22 00:19:41 +0000 | |||
1144 | @@ -36,6 +36,7 @@ | |||
1145 | 36 | flags.DEFINE_integer("max_networks", 1000, | 36 | flags.DEFINE_integer("max_networks", 1000, |
1146 | 37 | "maximum number of networks to allow per host") | 37 | "maximum number of networks to allow per host") |
1147 | 38 | 38 | ||
1148 | 39 | |||
1149 | 39 | class SimpleScheduler(chance.ChanceScheduler): | 40 | class SimpleScheduler(chance.ChanceScheduler): |
1150 | 40 | """Implements Naive Scheduler that tries to find least loaded host.""" | 41 | """Implements Naive Scheduler that tries to find least loaded host.""" |
1151 | 41 | 42 | ||
1152 | 42 | 43 | ||
1153 | === modified file 'nova/virt/fake.py' | |||
1154 | --- nova/virt/fake.py 2010-09-20 09:46:18 +0000 | |||
1155 | +++ nova/virt/fake.py 2010-10-22 00:19:41 +0000 | |||
1156 | @@ -226,6 +226,7 @@ | |||
1157 | 226 | def get_console_output(self, instance): | 226 | def get_console_output(self, instance): |
1158 | 227 | return 'FAKE CONSOLE OUTPUT' | 227 | return 'FAKE CONSOLE OUTPUT' |
1159 | 228 | 228 | ||
1160 | 229 | |||
1161 | 229 | class FakeInstance(object): | 230 | class FakeInstance(object): |
1162 | 230 | def __init__(self): | 231 | def __init__(self): |
1163 | 231 | self._state = power_state.NOSTATE | 232 | self._state = power_state.NOSTATE |
1164 | 232 | 233 | ||
1165 | === modified file 'nova/virt/images.py' | |||
1166 | --- nova/virt/images.py 2010-10-07 14:03:43 +0000 | |||
1167 | +++ nova/virt/images.py 2010-10-22 00:19:41 +0000 | |||
1168 | @@ -62,8 +62,8 @@ | |||
1169 | 62 | headers['Authorization'] = 'AWS %s:%s' % (access, signature) | 62 | headers['Authorization'] = 'AWS %s:%s' % (access, signature) |
1170 | 63 | 63 | ||
1171 | 64 | cmd = ['/usr/bin/curl', '--fail', '--silent', url] | 64 | cmd = ['/usr/bin/curl', '--fail', '--silent', url] |
1174 | 65 | for (k,v) in headers.iteritems(): | 65 | for (k, v) in headers.iteritems(): |
1175 | 66 | cmd += ['-H', '%s: %s' % (k,v)] | 66 | cmd += ['-H', '%s: %s' % (k, v)] |
1176 | 67 | 67 | ||
1177 | 68 | cmd += ['-o', path] | 68 | cmd += ['-o', path] |
1178 | 69 | return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) | 69 | return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) |
1179 | 70 | 70 | ||
1180 | === modified file 'nova/virt/libvirt_conn.py' | |||
1181 | --- nova/virt/libvirt_conn.py 2010-10-18 20:32:45 +0000 | |||
1182 | +++ nova/virt/libvirt_conn.py 2010-10-22 00:19:41 +0000 | |||
1183 | @@ -62,7 +62,8 @@ | |||
1184 | 62 | 'Template file for injected network') | 62 | 'Template file for injected network') |
1185 | 63 | flags.DEFINE_string('libvirt_type', | 63 | flags.DEFINE_string('libvirt_type', |
1186 | 64 | 'kvm', | 64 | 'kvm', |
1188 | 65 | 'Libvirt domain type (valid options are: kvm, qemu, uml, xen)') | 65 | 'Libvirt domain type (valid options are: ' |
1189 | 66 | 'kvm, qemu, uml, xen)') | ||
1190 | 66 | flags.DEFINE_string('libvirt_uri', | 67 | flags.DEFINE_string('libvirt_uri', |
1191 | 67 | '', | 68 | '', |
1192 | 68 | 'Override the default libvirt URI (which is dependent' | 69 | 'Override the default libvirt URI (which is dependent' |
1193 | @@ -96,7 +97,8 @@ | |||
1194 | 96 | def _conn(self): | 97 | def _conn(self): |
1195 | 97 | if not self._wrapped_conn or not self._test_connection(): | 98 | if not self._wrapped_conn or not self._test_connection(): |
1196 | 98 | logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) | 99 | logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) |
1198 | 99 | self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) | 100 | self._wrapped_conn = self._connect(self.libvirt_uri, |
1199 | 101 | self.read_only) | ||
1200 | 100 | return self._wrapped_conn | 102 | return self._wrapped_conn |
1201 | 101 | 103 | ||
1202 | 102 | def _test_connection(self): | 104 | def _test_connection(self): |
1203 | @@ -150,6 +152,7 @@ | |||
1204 | 150 | # WE'LL save this for when we do shutdown, | 152 | # WE'LL save this for when we do shutdown, |
1205 | 151 | # instead of destroy - but destroy returns immediately | 153 | # instead of destroy - but destroy returns immediately |
1206 | 152 | timer = task.LoopingCall(f=None) | 154 | timer = task.LoopingCall(f=None) |
1207 | 155 | |||
1208 | 153 | def _wait_for_shutdown(): | 156 | def _wait_for_shutdown(): |
1209 | 154 | try: | 157 | try: |
1210 | 155 | state = self.get_info(instance['name'])['state'] | 158 | state = self.get_info(instance['name'])['state'] |
1211 | @@ -164,6 +167,7 @@ | |||
1212 | 164 | power_state.SHUTDOWN) | 167 | power_state.SHUTDOWN) |
1213 | 165 | timer.stop() | 168 | timer.stop() |
1214 | 166 | d.callback(None) | 169 | d.callback(None) |
1215 | 170 | |||
1216 | 167 | timer.f = _wait_for_shutdown | 171 | timer.f = _wait_for_shutdown |
1217 | 168 | timer.start(interval=0.5, now=True) | 172 | timer.start(interval=0.5, now=True) |
1218 | 169 | return d | 173 | return d |
1219 | @@ -201,6 +205,7 @@ | |||
1220 | 201 | 205 | ||
1221 | 202 | d = defer.Deferred() | 206 | d = defer.Deferred() |
1222 | 203 | timer = task.LoopingCall(f=None) | 207 | timer = task.LoopingCall(f=None) |
1223 | 208 | |||
1224 | 204 | def _wait_for_reboot(): | 209 | def _wait_for_reboot(): |
1225 | 205 | try: | 210 | try: |
1226 | 206 | state = self.get_info(instance['name'])['state'] | 211 | state = self.get_info(instance['name'])['state'] |
1227 | @@ -217,6 +222,7 @@ | |||
1228 | 217 | power_state.SHUTDOWN) | 222 | power_state.SHUTDOWN) |
1229 | 218 | timer.stop() | 223 | timer.stop() |
1230 | 219 | d.callback(None) | 224 | d.callback(None) |
1231 | 225 | |||
1232 | 220 | timer.f = _wait_for_reboot | 226 | timer.f = _wait_for_reboot |
1233 | 221 | timer.start(interval=0.5, now=True) | 227 | timer.start(interval=0.5, now=True) |
1234 | 222 | yield d | 228 | yield d |
1235 | @@ -229,7 +235,8 @@ | |||
1236 | 229 | instance['id'], | 235 | instance['id'], |
1237 | 230 | power_state.NOSTATE, | 236 | power_state.NOSTATE, |
1238 | 231 | 'launching') | 237 | 'launching') |
1240 | 232 | yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) | 238 | yield NWFilterFirewall(self._conn).\ |
1241 | 239 | setup_nwfilters_for_instance(instance) | ||
1242 | 233 | yield self._create_image(instance, xml) | 240 | yield self._create_image(instance, xml) |
1243 | 234 | yield self._conn.createXML(xml, 0) | 241 | yield self._conn.createXML(xml, 0) |
1244 | 235 | # TODO(termie): this should actually register | 242 | # TODO(termie): this should actually register |
1245 | @@ -238,6 +245,7 @@ | |||
1246 | 238 | 245 | ||
1247 | 239 | local_d = defer.Deferred() | 246 | local_d = defer.Deferred() |
1248 | 240 | timer = task.LoopingCall(f=None) | 247 | timer = task.LoopingCall(f=None) |
1249 | 248 | |||
1250 | 241 | def _wait_for_boot(): | 249 | def _wait_for_boot(): |
1251 | 242 | try: | 250 | try: |
1252 | 243 | state = self.get_info(instance['name'])['state'] | 251 | state = self.get_info(instance['name'])['state'] |
1253 | @@ -265,8 +273,9 @@ | |||
1254 | 265 | 273 | ||
1255 | 266 | if virsh_output.startswith('/dev/'): | 274 | if virsh_output.startswith('/dev/'): |
1256 | 267 | logging.info('cool, it\'s a device') | 275 | logging.info('cool, it\'s a device') |
1259 | 268 | d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) | 276 | d = process.simple_execute("sudo dd if=%s iflag=nonblock" % |
1260 | 269 | d.addCallback(lambda r:r[0]) | 277 | virsh_output, check_exit_code=False) |
1261 | 278 | d.addCallback(lambda r: r[0]) | ||
1262 | 270 | return d | 279 | return d |
1263 | 271 | else: | 280 | else: |
1264 | 272 | return '' | 281 | return '' |
1265 | @@ -285,11 +294,15 @@ | |||
1266 | 285 | 294 | ||
1267 | 286 | @exception.wrap_exception | 295 | @exception.wrap_exception |
1268 | 287 | def get_console_output(self, instance): | 296 | def get_console_output(self, instance): |
1271 | 288 | console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') | 297 | console_log = os.path.join(FLAGS.instances_path, instance['name'], |
1272 | 289 | d = process.simple_execute('sudo chown %d %s' % (os.getuid(), console_log)) | 298 | 'console.log') |
1273 | 299 | d = process.simple_execute('sudo chown %d %s' % (os.getuid(), | ||
1274 | 300 | console_log)) | ||
1275 | 290 | if FLAGS.libvirt_type == 'xen': | 301 | if FLAGS.libvirt_type == 'xen': |
1276 | 291 | # Xen is spethial | 302 | # Xen is spethial |
1278 | 292 | d.addCallback(lambda _: process.simple_execute("virsh ttyconsole %s" % instance['name'])) | 303 | d.addCallback(lambda _: |
1279 | 304 | process.simple_execute("virsh ttyconsole %s" % | ||
1280 | 305 | instance['name'])) | ||
1281 | 293 | d.addCallback(self._flush_xen_console) | 306 | d.addCallback(self._flush_xen_console) |
1282 | 294 | d.addCallback(self._append_to_file, console_log) | 307 | d.addCallback(self._append_to_file, console_log) |
1283 | 295 | else: | 308 | else: |
1284 | @@ -297,7 +310,6 @@ | |||
1285 | 297 | d.addCallback(self._dump_file) | 310 | d.addCallback(self._dump_file) |
1286 | 298 | return d | 311 | return d |
1287 | 299 | 312 | ||
1288 | 300 | |||
1289 | 301 | @defer.inlineCallbacks | 313 | @defer.inlineCallbacks |
1290 | 302 | def _create_image(self, inst, libvirt_xml): | 314 | def _create_image(self, inst, libvirt_xml): |
1291 | 303 | # syntactic nicety | 315 | # syntactic nicety |
1292 | @@ -309,7 +321,6 @@ | |||
1293 | 309 | yield process.simple_execute('mkdir -p %s' % basepath()) | 321 | yield process.simple_execute('mkdir -p %s' % basepath()) |
1294 | 310 | yield process.simple_execute('chmod 0777 %s' % basepath()) | 322 | yield process.simple_execute('chmod 0777 %s' % basepath()) |
1295 | 311 | 323 | ||
1296 | 312 | |||
1297 | 313 | # TODO(termie): these are blocking calls, it would be great | 324 | # TODO(termie): these are blocking calls, it would be great |
1298 | 314 | # if they weren't. | 325 | # if they weren't. |
1299 | 315 | logging.info('instance %s: Creating image', inst['name']) | 326 | logging.info('instance %s: Creating image', inst['name']) |
1300 | @@ -317,17 +328,21 @@ | |||
1301 | 317 | f.write(libvirt_xml) | 328 | f.write(libvirt_xml) |
1302 | 318 | f.close() | 329 | f.close() |
1303 | 319 | 330 | ||
1305 | 320 | os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660)) | 331 | os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, |
1306 | 332 | 0660)) | ||
1307 | 321 | 333 | ||
1308 | 322 | user = manager.AuthManager().get_user(inst['user_id']) | 334 | user = manager.AuthManager().get_user(inst['user_id']) |
1309 | 323 | project = manager.AuthManager().get_project(inst['project_id']) | 335 | project = manager.AuthManager().get_project(inst['project_id']) |
1310 | 324 | 336 | ||
1311 | 325 | if not os.path.exists(basepath('disk')): | 337 | if not os.path.exists(basepath('disk')): |
1313 | 326 | yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) | 338 | yield images.fetch(inst.image_id, basepath('disk-raw'), user, |
1314 | 339 | project) | ||
1315 | 327 | if not os.path.exists(basepath('kernel')): | 340 | if not os.path.exists(basepath('kernel')): |
1317 | 328 | yield images.fetch(inst.kernel_id, basepath('kernel'), user, project) | 341 | yield images.fetch(inst.kernel_id, basepath('kernel'), user, |
1318 | 342 | project) | ||
1319 | 329 | if not os.path.exists(basepath('ramdisk')): | 343 | if not os.path.exists(basepath('ramdisk')): |
1321 | 330 | yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) | 344 | yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, |
1322 | 345 | project) | ||
1323 | 331 | 346 | ||
1324 | 332 | execute = lambda cmd, process_input=None, check_exit_code=True: \ | 347 | execute = lambda cmd, process_input=None, check_exit_code=True: \ |
1325 | 333 | process.simple_execute(cmd=cmd, | 348 | process.simple_execute(cmd=cmd, |
1326 | @@ -339,8 +354,8 @@ | |||
1327 | 339 | network_ref = db.network_get_by_instance(context.get_admin_context(), | 354 | network_ref = db.network_get_by_instance(context.get_admin_context(), |
1328 | 340 | inst['id']) | 355 | inst['id']) |
1329 | 341 | if network_ref['injected']: | 356 | if network_ref['injected']: |
1332 | 342 | address = db.instance_get_fixed_address(context.get_admin_context(), | 357 | admin_context = context.get_admin_context() |
1333 | 343 | inst['id']) | 358 | address = db.instance_get_fixed_address(admin_context, inst['id']) |
1334 | 344 | with open(FLAGS.injected_network_template) as f: | 359 | with open(FLAGS.injected_network_template) as f: |
1335 | 345 | net = f.read() % {'address': address, | 360 | net = f.read() % {'address': address, |
1336 | 346 | 'netmask': network_ref['netmask'], | 361 | 'netmask': network_ref['netmask'], |
1337 | @@ -354,7 +369,8 @@ | |||
1338 | 354 | if net: | 369 | if net: |
1339 | 355 | logging.info('instance %s: injecting net into image %s', | 370 | logging.info('instance %s: injecting net into image %s', |
1340 | 356 | inst['name'], inst.image_id) | 371 | inst['name'], inst.image_id) |
1342 | 357 | yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) | 372 | yield disk.inject_data(basepath('disk-raw'), key, net, |
1343 | 373 | execute=execute) | ||
1344 | 358 | 374 | ||
1345 | 359 | if os.path.exists(basepath('disk')): | 375 | if os.path.exists(basepath('disk')): |
1346 | 360 | yield process.simple_execute('rm -f %s' % basepath('disk')) | 376 | yield process.simple_execute('rm -f %s' % basepath('disk')) |
1347 | @@ -377,7 +393,8 @@ | |||
1348 | 377 | network = db.project_get_network(context.get_admin_context(), | 393 | network = db.project_get_network(context.get_admin_context(), |
1349 | 378 | instance['project_id']) | 394 | instance['project_id']) |
1350 | 379 | # FIXME(vish): stick this in db | 395 | # FIXME(vish): stick this in db |
1352 | 380 | instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']] | 396 | instance_type = instance['instance_type'] |
1353 | 397 | instance_type = instance_types.INSTANCE_TYPES[instance_type] | ||
1354 | 381 | ip_address = db.instance_get_fixed_address(context.get_admin_context(), | 398 | ip_address = db.instance_get_fixed_address(context.get_admin_context(), |
1355 | 382 | instance['id']) | 399 | instance['id']) |
1356 | 383 | # Assume that the gateway also acts as the dhcp server. | 400 | # Assume that the gateway also acts as the dhcp server. |
1357 | @@ -391,7 +408,7 @@ | |||
1358 | 391 | 'bridge_name': network['bridge'], | 408 | 'bridge_name': network['bridge'], |
1359 | 392 | 'mac_address': instance['mac_address'], | 409 | 'mac_address': instance['mac_address'], |
1360 | 393 | 'ip_address': ip_address, | 410 | 'ip_address': ip_address, |
1362 | 394 | 'dhcp_server': dhcp_server } | 411 | 'dhcp_server': dhcp_server} |
1363 | 395 | libvirt_xml = self.libvirt_xml % xml_info | 412 | libvirt_xml = self.libvirt_xml % xml_info |
1364 | 396 | logging.debug('instance %s: finished toXML method', instance['name']) | 413 | logging.debug('instance %s: finished toXML method', instance['name']) |
1365 | 397 | 414 | ||
1366 | @@ -506,7 +523,6 @@ | |||
1367 | 506 | domain = self._conn.lookupByName(instance_name) | 523 | domain = self._conn.lookupByName(instance_name) |
1368 | 507 | return domain.interfaceStats(interface) | 524 | return domain.interfaceStats(interface) |
1369 | 508 | 525 | ||
1370 | 509 | |||
1371 | 510 | def refresh_security_group(self, security_group_id): | 526 | def refresh_security_group(self, security_group_id): |
1372 | 511 | fw = NWFilterFirewall(self._conn) | 527 | fw = NWFilterFirewall(self._conn) |
1373 | 512 | fw.ensure_security_group_filter(security_group_id) | 528 | fw.ensure_security_group_filter(security_group_id) |
1374 | @@ -557,7 +573,6 @@ | |||
1375 | 557 | def __init__(self, get_connection): | 573 | def __init__(self, get_connection): |
1376 | 558 | self._conn = get_connection | 574 | self._conn = get_connection |
1377 | 559 | 575 | ||
1378 | 560 | |||
1379 | 561 | nova_base_filter = '''<filter name='nova-base' chain='root'> | 576 | nova_base_filter = '''<filter name='nova-base' chain='root'> |
1380 | 562 | <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid> | 577 | <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid> |
1381 | 563 | <filterref filter='no-mac-spoofing'/> | 578 | <filterref filter='no-mac-spoofing'/> |
1382 | @@ -578,7 +593,8 @@ | |||
1383 | 578 | srcportstart='68' | 593 | srcportstart='68' |
1384 | 579 | dstportstart='67'/> | 594 | dstportstart='67'/> |
1385 | 580 | </rule> | 595 | </rule> |
1387 | 581 | <rule action='accept' direction='in' priority='100'> | 596 | <rule action='accept' direction='in' |
1388 | 597 | priority='100'> | ||
1389 | 582 | <udp srcipaddr='$DHCPSERVER' | 598 | <udp srcipaddr='$DHCPSERVER' |
1390 | 583 | srcportstart='67' | 599 | srcportstart='67' |
1391 | 584 | dstportstart='68'/> | 600 | dstportstart='68'/> |
1392 | @@ -588,8 +604,8 @@ | |||
1393 | 588 | def nova_base_ipv4_filter(self): | 604 | def nova_base_ipv4_filter(self): |
1394 | 589 | retval = "<filter name='nova-base-ipv4' chain='ipv4'>" | 605 | retval = "<filter name='nova-base-ipv4' chain='ipv4'>" |
1395 | 590 | for protocol in ['tcp', 'udp', 'icmp']: | 606 | for protocol in ['tcp', 'udp', 'icmp']: |
1398 | 591 | for direction,action,priority in [('out','accept', 399), | 607 | for direction, action, priority in [('out', 'accept', 399), |
1399 | 592 | ('inout','drop', 400)]: | 608 | ('inout', 'drop', 400)]: |
1400 | 593 | retval += """<rule action='%s' direction='%s' priority='%d'> | 609 | retval += """<rule action='%s' direction='%s' priority='%d'> |
1401 | 594 | <%s /> | 610 | <%s /> |
1402 | 595 | </rule>""" % (action, direction, | 611 | </rule>""" % (action, direction, |
1403 | @@ -597,12 +613,11 @@ | |||
1404 | 597 | retval += '</filter>' | 613 | retval += '</filter>' |
1405 | 598 | return retval | 614 | return retval |
1406 | 599 | 615 | ||
1407 | 600 | |||
1408 | 601 | def nova_base_ipv6_filter(self): | 616 | def nova_base_ipv6_filter(self): |
1409 | 602 | retval = "<filter name='nova-base-ipv6' chain='ipv6'>" | 617 | retval = "<filter name='nova-base-ipv6' chain='ipv6'>" |
1410 | 603 | for protocol in ['tcp', 'udp', 'icmp']: | 618 | for protocol in ['tcp', 'udp', 'icmp']: |
1413 | 604 | for direction,action,priority in [('out','accept',399), | 619 | for direction, action, priority in [('out', 'accept', 399), |
1414 | 605 | ('inout','drop',400)]: | 620 | ('inout', 'drop', 400)]: |
1415 | 606 | retval += """<rule action='%s' direction='%s' priority='%d'> | 621 | retval += """<rule action='%s' direction='%s' priority='%d'> |
1416 | 607 | <%s-ipv6 /> | 622 | <%s-ipv6 /> |
1417 | 608 | </rule>""" % (action, direction, | 623 | </rule>""" % (action, direction, |
1418 | @@ -610,7 +625,6 @@ | |||
1419 | 610 | retval += '</filter>' | 625 | retval += '</filter>' |
1420 | 611 | return retval | 626 | return retval |
1421 | 612 | 627 | ||
1422 | 613 | |||
1423 | 614 | def nova_project_filter(self, project, net, mask): | 628 | def nova_project_filter(self, project, net, mask): |
1424 | 615 | retval = "<filter name='nova-project-%s' chain='ipv4'>" % project | 629 | retval = "<filter name='nova-project-%s' chain='ipv4'>" % project |
1425 | 616 | for protocol in ['tcp', 'udp', 'icmp']: | 630 | for protocol in ['tcp', 'udp', 'icmp']: |
1426 | @@ -620,14 +634,12 @@ | |||
1427 | 620 | retval += '</filter>' | 634 | retval += '</filter>' |
1428 | 621 | return retval | 635 | return retval |
1429 | 622 | 636 | ||
1430 | 623 | |||
1431 | 624 | def _define_filter(self, xml): | 637 | def _define_filter(self, xml): |
1432 | 625 | if callable(xml): | 638 | if callable(xml): |
1433 | 626 | xml = xml() | 639 | xml = xml() |
1434 | 627 | d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) | 640 | d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) |
1435 | 628 | return d | 641 | return d |
1436 | 629 | 642 | ||
1437 | 630 | |||
1438 | 631 | @staticmethod | 643 | @staticmethod |
1439 | 632 | def _get_net_and_mask(cidr): | 644 | def _get_net_and_mask(cidr): |
1440 | 633 | net = IPy.IP(cidr) | 645 | net = IPy.IP(cidr) |
1441 | @@ -646,9 +658,9 @@ | |||
1442 | 646 | yield self._define_filter(self.nova_dhcp_filter) | 658 | yield self._define_filter(self.nova_dhcp_filter) |
1443 | 647 | yield self._define_filter(self.nova_base_filter) | 659 | yield self._define_filter(self.nova_base_filter) |
1444 | 648 | 660 | ||
1448 | 649 | nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" + | 661 | nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \ |
1449 | 650 | " <filterref filter='nova-base' />\n" | 662 | " <filterref filter='nova-base' />\n" % \ |
1450 | 651 | ) % instance['name'] | 663 | instance['name'] |
1451 | 652 | 664 | ||
1452 | 653 | if FLAGS.allow_project_net_traffic: | 665 | if FLAGS.allow_project_net_traffic: |
1453 | 654 | network_ref = db.project_get_network(context.get_admin_context(), | 666 | network_ref = db.project_get_network(context.get_admin_context(), |
1454 | @@ -658,14 +670,14 @@ | |||
1455 | 658 | net, mask) | 670 | net, mask) |
1456 | 659 | yield self._define_filter(project_filter) | 671 | yield self._define_filter(project_filter) |
1457 | 660 | 672 | ||
1460 | 661 | nwfilter_xml += (" <filterref filter='nova-project-%s' />\n" | 673 | nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \ |
1461 | 662 | ) % instance['project_id'] | 674 | instance['project_id'] |
1462 | 663 | 675 | ||
1463 | 664 | for security_group in instance.security_groups: | 676 | for security_group in instance.security_groups: |
1464 | 665 | yield self.ensure_security_group_filter(security_group['id']) | 677 | yield self.ensure_security_group_filter(security_group['id']) |
1465 | 666 | 678 | ||
1468 | 667 | nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" | 679 | nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \ |
1469 | 668 | ) % security_group['id'] | 680 | security_group['id'] |
1470 | 669 | nwfilter_xml += "</filter>" | 681 | nwfilter_xml += "</filter>" |
1471 | 670 | 682 | ||
1472 | 671 | yield self._define_filter(nwfilter_xml) | 683 | yield self._define_filter(nwfilter_xml) |
1473 | @@ -675,7 +687,6 @@ | |||
1474 | 675 | return self._define_filter( | 687 | return self._define_filter( |
1475 | 676 | self.security_group_to_nwfilter_xml(security_group_id)) | 688 | self.security_group_to_nwfilter_xml(security_group_id)) |
1476 | 677 | 689 | ||
1477 | 678 | |||
1478 | 679 | def security_group_to_nwfilter_xml(self, security_group_id): | 690 | def security_group_to_nwfilter_xml(self, security_group_id): |
1479 | 680 | security_group = db.security_group_get(context.get_admin_context(), | 691 | security_group = db.security_group_get(context.get_admin_context(), |
1480 | 681 | security_group_id) | 692 | security_group_id) |
1481 | @@ -684,12 +695,15 @@ | |||
1482 | 684 | rule_xml += "<rule action='accept' direction='in' priority='300'>" | 695 | rule_xml += "<rule action='accept' direction='in' priority='300'>" |
1483 | 685 | if rule.cidr: | 696 | if rule.cidr: |
1484 | 686 | net, mask = self._get_net_and_mask(rule.cidr) | 697 | net, mask = self._get_net_and_mask(rule.cidr) |
1486 | 687 | rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask) | 698 | rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
1487 | 699 | (rule.protocol, net, mask) | ||
1488 | 688 | if rule.protocol in ['tcp', 'udp']: | 700 | if rule.protocol in ['tcp', 'udp']: |
1489 | 689 | rule_xml += "dstportstart='%s' dstportend='%s' " % \ | 701 | rule_xml += "dstportstart='%s' dstportend='%s' " % \ |
1490 | 690 | (rule.from_port, rule.to_port) | 702 | (rule.from_port, rule.to_port) |
1491 | 691 | elif rule.protocol == 'icmp': | 703 | elif rule.protocol == 'icmp': |
1493 | 692 | logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port)) | 704 | logging.info('rule.protocol: %r, rule.from_port: %r, ' |
1494 | 705 | 'rule.to_port: %r' % | ||
1495 | 706 | (rule.protocol, rule.from_port, rule.to_port)) | ||
1496 | 693 | if rule.from_port != -1: | 707 | if rule.from_port != -1: |
1497 | 694 | rule_xml += "type='%s' " % rule.from_port | 708 | rule_xml += "type='%s' " % rule.from_port |
1498 | 695 | if rule.to_port != -1: | 709 | if rule.to_port != -1: |
1499 | @@ -697,5 +711,6 @@ | |||
1500 | 697 | 711 | ||
1501 | 698 | rule_xml += '/>\n' | 712 | rule_xml += '/>\n' |
1502 | 699 | rule_xml += "</rule>\n" | 713 | rule_xml += "</rule>\n" |
1504 | 700 | xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,) | 714 | xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \ |
1505 | 715 | (security_group_id, rule_xml,) | ||
1506 | 701 | return xml | 716 | return xml |
1507 | 702 | 717 | ||
1508 | === modified file 'nova/virt/xenapi.py' | |||
1509 | --- nova/virt/xenapi.py 2010-10-04 20:32:00 +0000 | |||
1510 | +++ nova/virt/xenapi.py 2010-10-22 00:19:41 +0000 | |||
1511 | @@ -75,12 +75,11 @@ | |||
1512 | 75 | 75 | ||
1513 | 76 | 76 | ||
1514 | 77 | XENAPI_POWER_STATE = { | 77 | XENAPI_POWER_STATE = { |
1521 | 78 | 'Halted' : power_state.SHUTDOWN, | 78 | 'Halted': power_state.SHUTDOWN, |
1522 | 79 | 'Running' : power_state.RUNNING, | 79 | 'Running': power_state.RUNNING, |
1523 | 80 | 'Paused' : power_state.PAUSED, | 80 | 'Paused': power_state.PAUSED, |
1524 | 81 | 'Suspended': power_state.SHUTDOWN, # FIXME | 81 | 'Suspended': power_state.SHUTDOWN, # FIXME |
1525 | 82 | 'Crashed' : power_state.CRASHED | 82 | 'Crashed': power_state.CRASHED} |
1520 | 83 | } | ||
1526 | 84 | 83 | ||
1527 | 85 | 84 | ||
1528 | 86 | def get_connection(_): | 85 | def get_connection(_): |
1529 | @@ -90,12 +89,15 @@ | |||
1530 | 90 | # library when not using XenAPI. | 89 | # library when not using XenAPI. |
1531 | 91 | global XenAPI | 90 | global XenAPI |
1532 | 92 | if XenAPI is None: | 91 | if XenAPI is None: |
1534 | 93 | XenAPI = __import__('XenAPI') | 92 | XenAPI = __import__('XenAPI') |
1535 | 94 | url = FLAGS.xenapi_connection_url | 93 | url = FLAGS.xenapi_connection_url |
1536 | 95 | username = FLAGS.xenapi_connection_username | 94 | username = FLAGS.xenapi_connection_username |
1537 | 96 | password = FLAGS.xenapi_connection_password | 95 | password = FLAGS.xenapi_connection_password |
1538 | 97 | if not url or password is None: | 96 | if not url or password is None: |
1540 | 98 | raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi') | 97 | raise Exception('Must specify xenapi_connection_url, ' |
1541 | 98 | 'xenapi_connection_username (optionally), and ' | ||
1542 | 99 | 'xenapi_connection_password to use ' | ||
1543 | 100 | 'connection_type=xenapi') | ||
1544 | 99 | return XenAPIConnection(url, username, password) | 101 | return XenAPIConnection(url, username, password) |
1545 | 100 | 102 | ||
1546 | 101 | 103 | ||
1547 | @@ -141,7 +143,7 @@ | |||
1548 | 141 | def _create_vm(self, instance, kernel, ramdisk): | 143 | def _create_vm(self, instance, kernel, ramdisk): |
1549 | 142 | """Create a VM record. Returns a Deferred that gives the new | 144 | """Create a VM record. Returns a Deferred that gives the new |
1550 | 143 | VM reference.""" | 145 | VM reference.""" |
1552 | 144 | 146 | ||
1553 | 145 | instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] | 147 | instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] |
1554 | 146 | mem = str(long(instance_type['memory_mb']) * 1024 * 1024) | 148 | mem = str(long(instance_type['memory_mb']) * 1024 * 1024) |
1555 | 147 | vcpus = str(instance_type['vcpus']) | 149 | vcpus = str(instance_type['vcpus']) |
1556 | @@ -183,7 +185,7 @@ | |||
1557 | 183 | def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): | 185 | def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): |
1558 | 184 | """Create a VBD record. Returns a Deferred that gives the new | 186 | """Create a VBD record. Returns a Deferred that gives the new |
1559 | 185 | VBD reference.""" | 187 | VBD reference.""" |
1561 | 186 | 188 | ||
1562 | 187 | vbd_rec = {} | 189 | vbd_rec = {} |
1563 | 188 | vbd_rec['VM'] = vm_ref | 190 | vbd_rec['VM'] = vm_ref |
1564 | 189 | vbd_rec['VDI'] = vdi_ref | 191 | vbd_rec['VDI'] = vdi_ref |
1565 | @@ -207,10 +209,10 @@ | |||
1566 | 207 | def _create_vif(self, vm_ref, network_ref, mac_address): | 209 | def _create_vif(self, vm_ref, network_ref, mac_address): |
1567 | 208 | """Create a VIF record. Returns a Deferred that gives the new | 210 | """Create a VIF record. Returns a Deferred that gives the new |
1568 | 209 | VIF reference.""" | 211 | VIF reference.""" |
1570 | 210 | 212 | ||
1571 | 211 | vif_rec = {} | 213 | vif_rec = {} |
1572 | 212 | vif_rec['device'] = '0' | 214 | vif_rec['device'] = '0' |
1574 | 213 | vif_rec['network']= network_ref | 215 | vif_rec['network'] = network_ref |
1575 | 214 | vif_rec['VM'] = vm_ref | 216 | vif_rec['VM'] = vm_ref |
1576 | 215 | vif_rec['MAC'] = mac_address | 217 | vif_rec['MAC'] = mac_address |
1577 | 216 | vif_rec['MTU'] = '1500' | 218 | vif_rec['MTU'] = '1500' |
1578 | @@ -303,7 +305,7 @@ | |||
1579 | 303 | 305 | ||
1580 | 304 | def _lookup_blocking(self, i): | 306 | def _lookup_blocking(self, i): |
1581 | 305 | vms = self._conn.xenapi.VM.get_by_name_label(i) | 307 | vms = self._conn.xenapi.VM.get_by_name_label(i) |
1583 | 306 | n = len(vms) | 308 | n = len(vms) |
1584 | 307 | if n == 0: | 309 | if n == 0: |
1585 | 308 | return None | 310 | return None |
1586 | 309 | elif n > 1: | 311 | elif n > 1: |
1587 | 310 | 312 | ||
1588 | === modified file 'nova/volume/driver.py' | |||
1589 | --- nova/volume/driver.py 2010-09-12 15:16:59 +0000 | |||
1590 | +++ nova/volume/driver.py 2010-10-22 00:19:41 +0000 | |||
1591 | @@ -61,7 +61,6 @@ | |||
1592 | 61 | "Try number %s", tries) | 61 | "Try number %s", tries) |
1593 | 62 | yield self._execute("sleep %s" % tries ** 2) | 62 | yield self._execute("sleep %s" % tries ** 2) |
1594 | 63 | 63 | ||
1595 | 64 | |||
1596 | 65 | @defer.inlineCallbacks | 64 | @defer.inlineCallbacks |
1597 | 66 | def create_volume(self, volume_name, size): | 65 | def create_volume(self, volume_name, size): |
1598 | 67 | """Creates a logical volume""" | 66 | """Creates a logical volume""" |
lgtm