Merge lp:~eday/nova/pep8-fixes-other into lp:~hudson-openstack/nova/trunk
- pep8-fixes-other
- Merge into trunk
Proposed by
Eric Day
Status: | Merged |
---|---|
Approved by: | Eric Day |
Approved revision: | 379 |
Merged at revision: | 379 |
Proposed branch: | lp:~eday/nova/pep8-fixes-other |
Merge into: | lp:~hudson-openstack/nova/trunk |
Prerequisite: | lp:~eday/nova/pep8-fixes-db |
Diff against target: |
1598 lines (+274/-236) 24 files modified
nova/auth/dbdriver.py (+27/-24) nova/auth/fakeldap.py (+5/-6) nova/auth/ldapdriver.py (+7/-5) nova/auth/manager.py (+1/-1) nova/cloudpipe/pipelib.py (+19/-11) nova/compute/disk.py (+7/-8) nova/compute/monitor.py (+52/-61) nova/compute/power_state.py (+6/-7) nova/image/service.py (+9/-8) nova/image/services/glance/__init__.py (+4/-4) nova/network/linux_net.py (+7/-3) nova/network/manager.py (+2/-3) nova/objectstore/bucket.py (+12/-8) nova/objectstore/handler.py (+12/-9) nova/objectstore/image.py (+23/-18) nova/objectstore/stored.py (+2/-2) nova/scheduler/driver.py (+2/-0) nova/scheduler/manager.py (+2/-1) nova/scheduler/simple.py (+1/-0) nova/virt/fake.py (+1/-0) nova/virt/images.py (+2/-2) nova/virt/libvirt_conn.py (+56/-41) nova/virt/xenapi.py (+15/-13) nova/volume/driver.py (+0/-1) |
To merge this branch: | bzr merge lp:~eday/nova/pep8-fixes-other |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jay Pipes (community) | Approve | ||
Vish Ishaya (community) | Approve | ||
Review via email:
|
Commit message
Description of the change
Another pep8 cleanup branch for nova/*, should be merged after lp:~eday/nova/pep8-fixes-db.
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Jay Pipes (jaypipes) wrote : | # |
194 -import string # pylint: disable-msg=W0402
195 +import string # pylint: disable-msg=W0402
114 -SCOPE_ONELEVEL = 1 # not implemented
115 +SCOPE_ONELEVEL = 1 # Not implemented
Not sure what the "fix" is for the above comments... is there a PEP8 requirement for >1 space between a code line and a comment?
Other than that, looks good.
review:
Needs Information
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Eric Day (eday) wrote : | # |
Yup, you get a message like:
bin/nova-
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Jay Pipes (jaypipes) wrote : | # |
Heh, interesting. :)
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'nova/auth/dbdriver.py' |
2 | --- nova/auth/dbdriver.py 2010-10-14 05:18:01 +0000 |
3 | +++ nova/auth/dbdriver.py 2010-10-22 00:19:41 +0000 |
4 | @@ -47,19 +47,23 @@ |
5 | |
6 | def get_user(self, uid): |
7 | """Retrieve user by id""" |
8 | - return self._db_user_to_auth_user(db.user_get(context.get_admin_context(), uid)) |
9 | + user = db.user_get(context.get_admin_context(), uid) |
10 | + return self._db_user_to_auth_user(user) |
11 | |
12 | def get_user_from_access_key(self, access): |
13 | """Retrieve user by access key""" |
14 | - return self._db_user_to_auth_user(db.user_get_by_access_key(context.get_admin_context(), access)) |
15 | + user = db.user_get_by_access_key(context.get_admin_context(), access) |
16 | + return self._db_user_to_auth_user(user) |
17 | |
18 | def get_project(self, pid): |
19 | """Retrieve project by id""" |
20 | - return self._db_project_to_auth_projectuser(db.project_get(context.get_admin_context(), pid)) |
21 | + project = db.project_get(context.get_admin_context(), pid) |
22 | + return self._db_project_to_auth_projectuser(project) |
23 | |
24 | def get_users(self): |
25 | """Retrieve list of users""" |
26 | - return [self._db_user_to_auth_user(user) for user in db.user_get_all(context.get_admin_context())] |
27 | + return [self._db_user_to_auth_user(user) |
28 | + for user in db.user_get_all(context.get_admin_context())] |
29 | |
30 | def get_projects(self, uid=None): |
31 | """Retrieve list of projects""" |
32 | @@ -71,11 +75,10 @@ |
33 | |
34 | def create_user(self, name, access_key, secret_key, is_admin): |
35 | """Create a user""" |
36 | - values = { 'id' : name, |
37 | - 'access_key' : access_key, |
38 | - 'secret_key' : secret_key, |
39 | - 'is_admin' : is_admin |
40 | - } |
41 | + values = {'id': name, |
42 | + 'access_key': access_key, |
43 | + 'secret_key': secret_key, |
44 | + 'is_admin': is_admin} |
45 | try: |
46 | user_ref = db.user_create(context.get_admin_context(), values) |
47 | return self._db_user_to_auth_user(user_ref) |
48 | @@ -83,18 +86,19 @@ |
49 | raise exception.Duplicate('User %s already exists' % name) |
50 | |
51 | def _db_user_to_auth_user(self, user_ref): |
52 | - return { 'id' : user_ref['id'], |
53 | - 'name' : user_ref['id'], |
54 | - 'access' : user_ref['access_key'], |
55 | - 'secret' : user_ref['secret_key'], |
56 | - 'admin' : user_ref['is_admin'] } |
57 | + return {'id': user_ref['id'], |
58 | + 'name': user_ref['id'], |
59 | + 'access': user_ref['access_key'], |
60 | + 'secret': user_ref['secret_key'], |
61 | + 'admin': user_ref['is_admin']} |
62 | |
63 | def _db_project_to_auth_projectuser(self, project_ref): |
64 | - return { 'id' : project_ref['id'], |
65 | - 'name' : project_ref['name'], |
66 | - 'project_manager_id' : project_ref['project_manager'], |
67 | - 'description' : project_ref['description'], |
68 | - 'member_ids' : [member['id'] for member in project_ref['members']] } |
69 | + member_ids = [member['id'] for member in project_ref['members']] |
70 | + return {'id': project_ref['id'], |
71 | + 'name': project_ref['name'], |
72 | + 'project_manager_id': project_ref['project_manager'], |
73 | + 'description': project_ref['description'], |
74 | + 'member_ids': member_ids} |
75 | |
76 | def create_project(self, name, manager_uid, |
77 | description=None, member_uids=None): |
78 | @@ -121,10 +125,10 @@ |
79 | % member_uid) |
80 | members.add(member) |
81 | |
82 | - values = { 'id' : name, |
83 | - 'name' : name, |
84 | - 'project_manager' : manager['id'], |
85 | - 'description': description } |
86 | + values = {'id': name, |
87 | + 'name': name, |
88 | + 'project_manager': manager['id'], |
89 | + 'description': description} |
90 | |
91 | try: |
92 | project = db.project_create(context.get_admin_context(), values) |
93 | @@ -244,4 +248,3 @@ |
94 | if not project: |
95 | raise exception.NotFound('Project "%s" not found' % project_id) |
96 | return user, project |
97 | - |
98 | |
99 | === modified file 'nova/auth/fakeldap.py' |
100 | --- nova/auth/fakeldap.py 2010-10-14 13:07:37 +0000 |
101 | +++ nova/auth/fakeldap.py 2010-10-22 00:19:41 +0000 |
102 | @@ -35,6 +35,7 @@ |
103 | 'Port that redis is running on.') |
104 | flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') |
105 | |
106 | + |
107 | class Redis(object): |
108 | def __init__(self): |
109 | if hasattr(self.__class__, '_instance'): |
110 | @@ -51,19 +52,19 @@ |
111 | |
112 | |
113 | SCOPE_BASE = 0 |
114 | -SCOPE_ONELEVEL = 1 # not implemented |
115 | +SCOPE_ONELEVEL = 1 # Not implemented |
116 | SCOPE_SUBTREE = 2 |
117 | MOD_ADD = 0 |
118 | MOD_DELETE = 1 |
119 | MOD_REPLACE = 2 |
120 | |
121 | |
122 | -class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 |
123 | +class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 |
124 | """Duplicate exception class from real LDAP module.""" |
125 | pass |
126 | |
127 | |
128 | -class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 |
129 | +class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 |
130 | """Duplicate exception class from real LDAP module.""" |
131 | pass |
132 | |
133 | @@ -251,8 +252,6 @@ |
134 | return objects |
135 | |
136 | @property |
137 | - def __redis_prefix(self): # pylint: disable-msg=R0201 |
138 | + def __redis_prefix(self): # pylint: disable-msg=R0201 |
139 | """Get the prefix to use for all redis keys.""" |
140 | return 'ldap:' |
141 | - |
142 | - |
143 | |
144 | === modified file 'nova/auth/ldapdriver.py' |
145 | --- nova/auth/ldapdriver.py 2010-09-25 03:32:00 +0000 |
146 | +++ nova/auth/ldapdriver.py 2010-10-22 00:19:41 +0000 |
147 | @@ -294,24 +294,26 @@ |
148 | |
149 | def __find_dns(self, dn, query=None, scope=None): |
150 | """Find dns by query""" |
151 | - if scope is None: # one of the flags is 0!! |
152 | + if scope is None: |
153 | + # One of the flags is 0! |
154 | scope = self.ldap.SCOPE_SUBTREE |
155 | try: |
156 | res = self.conn.search_s(dn, scope, query) |
157 | except self.ldap.NO_SUCH_OBJECT: |
158 | return [] |
159 | - # just return the DNs |
160 | + # Just return the DNs |
161 | return [dn for dn, _attributes in res] |
162 | |
163 | def __find_objects(self, dn, query=None, scope=None): |
164 | """Find objects by query""" |
165 | - if scope is None: # one of the flags is 0!! |
166 | + if scope is None: |
167 | + # One of the flags is 0! |
168 | scope = self.ldap.SCOPE_SUBTREE |
169 | try: |
170 | res = self.conn.search_s(dn, scope, query) |
171 | except self.ldap.NO_SUCH_OBJECT: |
172 | return [] |
173 | - # just return the attributes |
174 | + # Just return the attributes |
175 | return [attributes for dn, attributes in res] |
176 | |
177 | def __find_role_dns(self, tree): |
178 | @@ -480,6 +482,6 @@ |
179 | class FakeLdapDriver(LdapDriver): |
180 | """Fake Ldap Auth driver""" |
181 | |
182 | - def __init__(self): # pylint: disable-msg=W0231 |
183 | + def __init__(self): # pylint: disable-msg=W0231 |
184 | __import__('nova.auth.fakeldap') |
185 | self.ldap = sys.modules['nova.auth.fakeldap'] |
186 | |
187 | === modified file 'nova/auth/manager.py' |
188 | --- nova/auth/manager.py 2010-10-15 15:18:40 +0000 |
189 | +++ nova/auth/manager.py 2010-10-22 00:19:41 +0000 |
190 | @@ -23,7 +23,7 @@ |
191 | import logging |
192 | import os |
193 | import shutil |
194 | -import string # pylint: disable-msg=W0402 |
195 | +import string # pylint: disable-msg=W0402 |
196 | import tempfile |
197 | import uuid |
198 | import zipfile |
199 | |
200 | === modified file 'nova/cloudpipe/pipelib.py' |
201 | --- nova/cloudpipe/pipelib.py 2010-10-01 12:57:17 +0000 |
202 | +++ nova/cloudpipe/pipelib.py 2010-10-22 00:19:41 +0000 |
203 | @@ -49,7 +49,7 @@ |
204 | self.manager = manager.AuthManager() |
205 | |
206 | def launch_vpn_instance(self, project_id): |
207 | - logging.debug( "Launching VPN for %s" % (project_id)) |
208 | + logging.debug("Launching VPN for %s" % (project_id)) |
209 | project = self.manager.get_project(project_id) |
210 | # Make a payload.zip |
211 | tmpfolder = tempfile.mkdtemp() |
212 | @@ -57,16 +57,18 @@ |
213 | zippath = os.path.join(tmpfolder, filename) |
214 | z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED) |
215 | |
216 | - z.write(FLAGS.boot_script_template,'autorun.sh') |
217 | + z.write(FLAGS.boot_script_template, 'autorun.sh') |
218 | z.close() |
219 | |
220 | key_name = self.setup_key_pair(project.project_manager_id, project_id) |
221 | zippy = open(zippath, "r") |
222 | - context = context.RequestContext(user=project.project_manager, project=project) |
223 | + context = context.RequestContext(user=project.project_manager, |
224 | + project=project) |
225 | |
226 | reservation = self.controller.run_instances(context, |
227 | - # run instances expects encoded userdata, it is decoded in the get_metadata_call |
228 | - # autorun.sh also decodes the zip file, hence the double encoding |
229 | + # Run instances expects encoded userdata, it is decoded in the |
230 | + # get_metadata_call. autorun.sh also decodes the zip file, hence |
231 | + # the double encoding. |
232 | user_data=zippy.read().encode("base64").encode("base64"), |
233 | max_count=1, |
234 | min_count=1, |
235 | @@ -79,12 +81,14 @@ |
236 | def setup_key_pair(self, user_id, project_id): |
237 | key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix) |
238 | try: |
239 | - private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name) |
240 | + private_key, fingerprint = self.manager.generate_key_pair(user_id, |
241 | + key_name) |
242 | try: |
243 | key_dir = os.path.join(FLAGS.keys_path, user_id) |
244 | if not os.path.exists(key_dir): |
245 | os.makedirs(key_dir) |
246 | - with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f: |
247 | + file_name = os.path.join(key_dir, '%s.pem' % key_name) |
248 | + with open(file_name, 'w') as f: |
249 | f.write(private_key) |
250 | except: |
251 | pass |
252 | @@ -95,9 +99,13 @@ |
253 | # def setup_secgroups(self, username): |
254 | # conn = self.euca.connection_for(username) |
255 | # try: |
256 | - # secgroup = conn.create_security_group("vpn-secgroup", "vpn-secgroup") |
257 | - # secgroup.authorize(ip_protocol = "udp", from_port = "1194", to_port = "1194", cidr_ip = "0.0.0.0/0") |
258 | - # secgroup.authorize(ip_protocol = "tcp", from_port = "80", to_port = "80", cidr_ip = "0.0.0.0/0") |
259 | - # secgroup.authorize(ip_protocol = "tcp", from_port = "22", to_port = "22", cidr_ip = "0.0.0.0/0") |
260 | + # secgroup = conn.create_security_group("vpn-secgroup", |
261 | + # "vpn-secgroup") |
262 | + # secgroup.authorize(ip_protocol = "udp", from_port = "1194", |
263 | + # to_port = "1194", cidr_ip = "0.0.0.0/0") |
264 | + # secgroup.authorize(ip_protocol = "tcp", from_port = "80", |
265 | + # to_port = "80", cidr_ip = "0.0.0.0/0") |
266 | + # secgroup.authorize(ip_protocol = "tcp", from_port = "22", |
267 | + # to_port = "22", cidr_ip = "0.0.0.0/0") |
268 | # except: |
269 | # pass |
270 | |
271 | === modified file 'nova/compute/disk.py' |
272 | --- nova/compute/disk.py 2010-10-18 20:40:03 +0000 |
273 | +++ nova/compute/disk.py 2010-10-22 00:19:41 +0000 |
274 | @@ -72,12 +72,12 @@ |
275 | " by sector size: %d / %d", local_bytes, sector_size) |
276 | local_sectors = local_bytes / sector_size |
277 | |
278 | - mbr_last = 62 # a |
279 | - primary_first = mbr_last + 1 # b |
280 | - primary_last = primary_first + primary_sectors - 1 # c |
281 | - local_first = primary_last + 1 # d |
282 | - local_last = local_first + local_sectors - 1 # e |
283 | - last_sector = local_last # e |
284 | + mbr_last = 62 # a |
285 | + primary_first = mbr_last + 1 # b |
286 | + primary_last = primary_first + primary_sectors - 1 # c |
287 | + local_first = primary_last + 1 # d |
288 | + local_last = local_first + local_sectors - 1 # e |
289 | + last_sector = local_last # e |
290 | |
291 | # create an empty file |
292 | yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' |
293 | @@ -157,7 +157,7 @@ |
294 | @defer.inlineCallbacks |
295 | def _inject_key_into_fs(key, fs, execute=None): |
296 | sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') |
297 | - yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter |
298 | + yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter |
299 | yield execute('sudo chown root %s' % sshdir) |
300 | yield execute('sudo chmod 700 %s' % sshdir) |
301 | keyfile = os.path.join(sshdir, 'authorized_keys') |
302 | @@ -169,4 +169,3 @@ |
303 | netfile = os.path.join(os.path.join(os.path.join( |
304 | fs, 'etc'), 'network'), 'interfaces') |
305 | yield execute('sudo tee %s' % netfile, net) |
306 | - |
307 | |
308 | === modified file 'nova/compute/monitor.py' |
309 | --- nova/compute/monitor.py 2010-08-16 12:16:21 +0000 |
310 | +++ nova/compute/monitor.py 2010-10-22 00:19:41 +0000 |
311 | @@ -85,8 +85,7 @@ |
312 | 'RRA:MAX:0.5:6:800', |
313 | 'RRA:MAX:0.5:24:800', |
314 | 'RRA:MAX:0.5:444:800', |
315 | - ] |
316 | - } |
317 | + ]} |
318 | |
319 | |
320 | utcnow = datetime.datetime.utcnow |
321 | @@ -97,15 +96,12 @@ |
322 | Updates the specified RRD file. |
323 | """ |
324 | filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name) |
325 | - |
326 | + |
327 | if not os.path.exists(filename): |
328 | init_rrd(instance, name) |
329 | - |
330 | + |
331 | timestamp = int(time.mktime(utcnow().timetuple())) |
332 | - rrdtool.update ( |
333 | - filename, |
334 | - '%d:%s' % (timestamp, data) |
335 | - ) |
336 | + rrdtool.update(filename, '%d:%s' % (timestamp, data)) |
337 | |
338 | |
339 | def init_rrd(instance, name): |
340 | @@ -113,29 +109,28 @@ |
341 | Initializes the specified RRD file. |
342 | """ |
343 | path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id) |
344 | - |
345 | + |
346 | if not os.path.exists(path): |
347 | os.makedirs(path) |
348 | - |
349 | + |
350 | filename = os.path.join(path, '%s.rrd' % name) |
351 | - |
352 | + |
353 | if not os.path.exists(filename): |
354 | - rrdtool.create ( |
355 | + rrdtool.create( |
356 | filename, |
357 | '--step', '%d' % FLAGS.monitoring_instances_step, |
358 | '--start', '0', |
359 | - *RRD_VALUES[name] |
360 | - ) |
361 | - |
362 | - |
363 | + *RRD_VALUES[name]) |
364 | + |
365 | + |
366 | def graph_cpu(instance, duration): |
367 | """ |
368 | Creates a graph of cpu usage for the specified instance and duration. |
369 | """ |
370 | path = instance.get_rrd_path() |
371 | filename = os.path.join(path, 'cpu-%s.png' % duration) |
372 | - |
373 | - rrdtool.graph ( |
374 | + |
375 | + rrdtool.graph( |
376 | filename, |
377 | '--disable-rrdtool-tag', |
378 | '--imgformat', 'PNG', |
379 | @@ -146,9 +141,8 @@ |
380 | '-l', '0', |
381 | '-u', '100', |
382 | 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'), |
383 | - 'AREA:cpu#eacc00:% CPU', |
384 | - ) |
385 | - |
386 | + 'AREA:cpu#eacc00:% CPU',) |
387 | + |
388 | store_graph(instance.instance_id, filename) |
389 | |
390 | |
391 | @@ -158,8 +152,8 @@ |
392 | """ |
393 | path = instance.get_rrd_path() |
394 | filename = os.path.join(path, 'net-%s.png' % duration) |
395 | - |
396 | - rrdtool.graph ( |
397 | + |
398 | + rrdtool.graph( |
399 | filename, |
400 | '--disable-rrdtool-tag', |
401 | '--imgformat', 'PNG', |
402 | @@ -174,20 +168,19 @@ |
403 | 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'), |
404 | 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'), |
405 | 'AREA:rx#00FF00:In traffic', |
406 | - 'LINE1:tx#0000FF:Out traffic', |
407 | - ) |
408 | - |
409 | + 'LINE1:tx#0000FF:Out traffic',) |
410 | + |
411 | store_graph(instance.instance_id, filename) |
412 | |
413 | - |
414 | + |
415 | def graph_disk(instance, duration): |
416 | """ |
417 | Creates a graph of disk usage for the specified duration. |
418 | - """ |
419 | + """ |
420 | path = instance.get_rrd_path() |
421 | filename = os.path.join(path, 'disk-%s.png' % duration) |
422 | - |
423 | - rrdtool.graph ( |
424 | + |
425 | + rrdtool.graph( |
426 | filename, |
427 | '--disable-rrdtool-tag', |
428 | '--imgformat', 'PNG', |
429 | @@ -202,9 +195,8 @@ |
430 | 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'), |
431 | 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'), |
432 | 'AREA:rd#00FF00:Read', |
433 | - 'LINE1:wr#0000FF:Write', |
434 | - ) |
435 | - |
436 | + 'LINE1:wr#0000FF:Write',) |
437 | + |
438 | store_graph(instance.instance_id, filename) |
439 | |
440 | |
441 | @@ -224,17 +216,16 @@ |
442 | is_secure=False, |
443 | calling_format=boto.s3.connection.OrdinaryCallingFormat(), |
444 | port=FLAGS.s3_port, |
445 | - host=FLAGS.s3_host |
446 | - ) |
447 | + host=FLAGS.s3_host) |
448 | bucket_name = '_%s.monitor' % instance_id |
449 | - |
450 | + |
451 | # Object store isn't creating the bucket like it should currently |
452 | # when it is first requested, so have to catch and create manually. |
453 | try: |
454 | bucket = s3.get_bucket(bucket_name) |
455 | except Exception: |
456 | bucket = s3.create_bucket(bucket_name) |
457 | - |
458 | + |
459 | key = boto.s3.Key(bucket) |
460 | key.key = os.path.basename(filename) |
461 | key.set_contents_from_filename(filename) |
462 | @@ -247,18 +238,18 @@ |
463 | self.last_updated = datetime.datetime.min |
464 | self.cputime = 0 |
465 | self.cputime_last_updated = None |
466 | - |
467 | + |
468 | init_rrd(self, 'cpu') |
469 | init_rrd(self, 'net') |
470 | init_rrd(self, 'disk') |
471 | - |
472 | + |
473 | def needs_update(self): |
474 | """ |
475 | Indicates whether this instance is due to have its statistics updated. |
476 | """ |
477 | delta = utcnow() - self.last_updated |
478 | return delta.seconds >= FLAGS.monitoring_instances_step |
479 | - |
480 | + |
481 | def update(self): |
482 | """ |
483 | Updates the instances statistics and stores the resulting graphs |
484 | @@ -271,7 +262,7 @@ |
485 | if data != None: |
486 | logging.debug('CPU: %s', data) |
487 | update_rrd(self, 'cpu', data) |
488 | - |
489 | + |
490 | data = self.fetch_net_stats() |
491 | logging.debug('NET: %s', data) |
492 | update_rrd(self, 'net', data) |
493 | @@ -279,7 +270,7 @@ |
494 | data = self.fetch_disk_stats() |
495 | logging.debug('DISK: %s', data) |
496 | update_rrd(self, 'disk', data) |
497 | - |
498 | + |
499 | # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls |
500 | # and make the methods @defer.inlineCallbacks. |
501 | graph_cpu(self, '1d') |
502 | @@ -297,13 +288,13 @@ |
503 | logging.exception('unexpected error during update') |
504 | |
505 | self.last_updated = utcnow() |
506 | - |
507 | + |
508 | def get_rrd_path(self): |
509 | """ |
510 | Returns the path to where RRD files are stored. |
511 | """ |
512 | return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id) |
513 | - |
514 | + |
515 | def fetch_cpu_stats(self): |
516 | """ |
517 | Returns cpu usage statistics for this instance. |
518 | @@ -327,17 +318,17 @@ |
519 | # Calculate the number of seconds between samples. |
520 | d = self.cputime_last_updated - cputime_last_updated |
521 | t = d.days * 86400 + d.seconds |
522 | - |
523 | + |
524 | logging.debug('t = %d', t) |
525 | |
526 | # Calculate change over time in number of nanoseconds of CPU time used. |
527 | cputime_delta = self.cputime - cputime_last |
528 | - |
529 | + |
530 | logging.debug('cputime_delta = %s', cputime_delta) |
531 | |
532 | # Get the number of virtual cpus in this domain. |
533 | vcpus = int(info['num_cpu']) |
534 | - |
535 | + |
536 | logging.debug('vcpus = %d', vcpus) |
537 | |
538 | # Calculate CPU % used and cap at 100. |
539 | @@ -349,9 +340,9 @@ |
540 | """ |
541 | rd = 0 |
542 | wr = 0 |
543 | - |
544 | + |
545 | disks = self.conn.get_disks(self.instance_id) |
546 | - |
547 | + |
548 | # Aggregate the read and write totals. |
549 | for disk in disks: |
550 | try: |
551 | @@ -363,7 +354,7 @@ |
552 | logging.error('Cannot get blockstats for "%s" on "%s"', |
553 | disk, self.instance_id) |
554 | raise |
555 | - |
556 | + |
557 | return '%d:%d' % (rd, wr) |
558 | |
559 | def fetch_net_stats(self): |
560 | @@ -372,9 +363,9 @@ |
561 | """ |
562 | rx = 0 |
563 | tx = 0 |
564 | - |
565 | + |
566 | interfaces = self.conn.get_interfaces(self.instance_id) |
567 | - |
568 | + |
569 | # Aggregate the in and out totals. |
570 | for interface in interfaces: |
571 | try: |
572 | @@ -385,7 +376,7 @@ |
573 | logging.error('Cannot get ifstats for "%s" on "%s"', |
574 | interface, self.instance_id) |
575 | raise |
576 | - |
577 | + |
578 | return '%d:%d' % (rx, tx) |
579 | |
580 | |
581 | @@ -400,16 +391,16 @@ |
582 | """ |
583 | self._instances = {} |
584 | self._loop = task.LoopingCall(self.updateInstances) |
585 | - |
586 | + |
587 | def startService(self): |
588 | self._instances = {} |
589 | self._loop.start(interval=FLAGS.monitoring_instances_delay) |
590 | service.Service.startService(self) |
591 | - |
592 | + |
593 | def stopService(self): |
594 | self._loop.stop() |
595 | service.Service.stopService(self) |
596 | - |
597 | + |
598 | def updateInstances(self): |
599 | """ |
600 | Update resource usage for all running instances. |
601 | @@ -420,20 +411,20 @@ |
602 | logging.exception('unexpected exception getting connection') |
603 | time.sleep(FLAGS.monitoring_instances_delay) |
604 | return |
605 | - |
606 | + |
607 | domain_ids = conn.list_instances() |
608 | try: |
609 | - self.updateInstances_(conn, domain_ids) |
610 | + self.updateInstances_(conn, domain_ids) |
611 | except Exception, exn: |
612 | - logging.exception('updateInstances_') |
613 | + logging.exception('updateInstances_') |
614 | |
615 | def updateInstances_(self, conn, domain_ids): |
616 | for domain_id in domain_ids: |
617 | - if not domain_id in self._instances: |
618 | + if not domain_id in self._instances: |
619 | instance = Instance(conn, domain_id) |
620 | self._instances[domain_id] = instance |
621 | logging.debug('Found instance: %s', domain_id) |
622 | - |
623 | + |
624 | for key in self._instances.keys(): |
625 | instance = self._instances[key] |
626 | if instance.needs_update(): |
627 | |
628 | === modified file 'nova/compute/power_state.py' |
629 | --- nova/compute/power_state.py 2010-07-18 17:15:12 +0000 |
630 | +++ nova/compute/power_state.py 2010-10-22 00:19:41 +0000 |
631 | @@ -30,12 +30,11 @@ |
632 | |
633 | def name(code): |
634 | d = { |
635 | - NOSTATE : 'pending', |
636 | - RUNNING : 'running', |
637 | - BLOCKED : 'blocked', |
638 | - PAUSED : 'paused', |
639 | + NOSTATE: 'pending', |
640 | + RUNNING: 'running', |
641 | + BLOCKED: 'blocked', |
642 | + PAUSED: 'paused', |
643 | SHUTDOWN: 'shutdown', |
644 | - SHUTOFF : 'shutdown', |
645 | - CRASHED : 'crashed', |
646 | - } |
647 | + SHUTOFF: 'shutdown', |
648 | + CRASHED: 'crashed'} |
649 | return d[code] |
650 | |
651 | === modified file 'nova/image/service.py' |
652 | --- nova/image/service.py 2010-10-15 20:24:02 +0000 |
653 | +++ nova/image/service.py 2010-10-22 00:19:41 +0000 |
654 | @@ -30,7 +30,8 @@ |
655 | flags.DEFINE_string('glance_teller_port', '9191', |
656 | 'Port for Glance\'s Teller service') |
657 | flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1', |
658 | - 'IP address or URL where Glance\'s Parallax service resides') |
659 | + 'IP address or URL where Glance\'s Parallax service ' |
660 | + 'resides') |
661 | flags.DEFINE_string('glance_parallax_port', '9292', |
662 | 'Port for Glance\'s Parallax service') |
663 | |
664 | @@ -120,10 +121,10 @@ |
665 | |
666 | def delete(self, image_id): |
667 | """ |
668 | - Delete the given image. |
669 | - |
670 | + Delete the given image. |
671 | + |
672 | :raises NotFound if the image does not exist. |
673 | - |
674 | + |
675 | """ |
676 | raise NotImplementedError |
677 | |
678 | @@ -131,14 +132,14 @@ |
679 | class LocalImageService(BaseImageService): |
680 | |
681 | """Image service storing images to local disk. |
682 | - |
683 | + |
684 | It assumes that image_ids are integers.""" |
685 | |
686 | def __init__(self): |
687 | self._path = "/tmp/nova/images" |
688 | try: |
689 | os.makedirs(self._path) |
690 | - except OSError: # exists |
691 | + except OSError: # Exists |
692 | pass |
693 | |
694 | def _path_to(self, image_id): |
695 | @@ -156,7 +157,7 @@ |
696 | |
697 | def show(self, id): |
698 | try: |
699 | - return pickle.load(open(self._path_to(id))) |
700 | + return pickle.load(open(self._path_to(id))) |
701 | except IOError: |
702 | raise exception.NotFound |
703 | |
704 | @@ -164,7 +165,7 @@ |
705 | """ |
706 | Store the image data and return the new image id. |
707 | """ |
708 | - id = random.randint(0, 2**32-1) |
709 | + id = random.randint(0, 2 ** 32 - 1) |
710 | data['id'] = id |
711 | self.update(id, data) |
712 | return id |
713 | |
714 | === modified file 'nova/image/services/glance/__init__.py' |
715 | --- nova/image/services/glance/__init__.py 2010-10-15 20:24:02 +0000 |
716 | +++ nova/image/services/glance/__init__.py 2010-10-22 00:19:41 +0000 |
717 | @@ -30,6 +30,7 @@ |
718 | |
719 | FLAGS = flags.FLAGS |
720 | |
721 | + |
722 | class TellerClient(object): |
723 | |
724 | def __init__(self): |
725 | @@ -153,7 +154,6 @@ |
726 | |
727 | |
728 | class GlanceImageService(nova.image.service.BaseImageService): |
729 | - |
730 | """Provides storage and retrieval of disk image objects within Glance.""" |
731 | |
732 | def __init__(self): |
733 | @@ -202,10 +202,10 @@ |
734 | |
735 | def delete(self, image_id): |
736 | """ |
737 | - Delete the given image. |
738 | - |
739 | + Delete the given image. |
740 | + |
741 | :raises NotFound if the image does not exist. |
742 | - |
743 | + |
744 | """ |
745 | self.parallax.delete_image_metadata(image_id) |
746 | |
747 | |
748 | === modified file 'nova/network/linux_net.py' |
749 | --- nova/network/linux_net.py 2010-10-20 20:54:53 +0000 |
750 | +++ nova/network/linux_net.py 2010-10-22 00:19:41 +0000 |
751 | @@ -53,6 +53,7 @@ |
752 | |
753 | DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] |
754 | |
755 | + |
756 | def init_host(): |
757 | """Basic networking setup goes here""" |
758 | # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port |
759 | @@ -72,6 +73,7 @@ |
760 | _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % |
761 | {'range': FLAGS.fixed_range}) |
762 | |
763 | + |
764 | def bind_floating_ip(floating_ip): |
765 | """Bind ip to public interface""" |
766 | _execute("sudo ip addr add %s dev %s" % (floating_ip, |
767 | @@ -103,7 +105,7 @@ |
768 | _confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT" |
769 | % (fixed_ip)) |
770 | for (protocol, port) in DEFAULT_PORTS: |
771 | - _confirm_rule("FORWARD","-d %s -p %s --dport %s -j ACCEPT" |
772 | + _confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" |
773 | % (fixed_ip, protocol, port)) |
774 | |
775 | |
776 | @@ -189,7 +191,8 @@ |
777 | |
778 | # if dnsmasq is already running, then tell it to reload |
779 | if pid: |
780 | - out, _err = _execute('cat /proc/%d/cmdline' % pid, check_exit_code=False) |
781 | + out, _err = _execute('cat /proc/%d/cmdline' % pid, |
782 | + check_exit_code=False) |
783 | if conffile in out: |
784 | try: |
785 | _execute('sudo kill -HUP %d' % pid) |
786 | @@ -233,7 +236,8 @@ |
787 | """Delete and re-add iptables rule""" |
788 | if FLAGS.use_nova_chains: |
789 | chain = "nova_%s" % chain.lower() |
790 | - _execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False) |
791 | + _execute("sudo iptables --delete %s %s" % (chain, cmd), |
792 | + check_exit_code=False) |
793 | _execute("sudo iptables -I %s %s" % (chain, cmd)) |
794 | |
795 | |
796 | |
797 | === modified file 'nova/network/manager.py' |
798 | --- nova/network/manager.py 2010-10-14 23:44:58 +0000 |
799 | +++ nova/network/manager.py 2010-10-22 00:19:41 +0000 |
800 | @@ -49,7 +49,8 @@ |
801 | flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') |
802 | flags.DEFINE_integer('network_size', 256, |
803 | 'Number of addresses in each private subnet') |
804 | -flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') |
805 | +flags.DEFINE_string('floating_range', '4.4.4.0/24', |
806 | + 'Floating IP address block') |
807 | flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') |
808 | flags.DEFINE_integer('cnt_vpn_clients', 5, |
809 | 'Number of addresses reserved for vpn clients') |
810 | @@ -287,7 +288,6 @@ |
811 | self.db.network_update(context, network_id, net) |
812 | |
813 | |
814 | - |
815 | class FlatDHCPManager(NetworkManager): |
816 | """Flat networking with dhcp""" |
817 | |
818 | @@ -432,4 +432,3 @@ |
819 | """Number of reserved ips at the top of the range""" |
820 | parent_reserved = super(VlanManager, self)._top_reserved_ips |
821 | return parent_reserved + FLAGS.cnt_vpn_clients |
822 | - |
823 | |
824 | === modified file 'nova/objectstore/bucket.py' |
825 | --- nova/objectstore/bucket.py 2010-10-14 05:07:43 +0000 |
826 | +++ nova/objectstore/bucket.py 2010-10-22 00:19:41 +0000 |
827 | @@ -69,7 +69,8 @@ |
828 | """Create a new bucket owned by a project. |
829 | |
830 | @bucket_name: a string representing the name of the bucket to create |
831 | - @context: a nova.auth.api.ApiContext object representing who owns the bucket. |
832 | + @context: a nova.auth.api.ApiContext object representing who owns the |
833 | + bucket. |
834 | |
835 | Raises: |
836 | NotAuthorized: if the bucket is already exists or has invalid name |
837 | @@ -77,12 +78,12 @@ |
838 | path = os.path.abspath(os.path.join( |
839 | FLAGS.buckets_path, bucket_name)) |
840 | if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ |
841 | - os.path.exists(path): |
842 | - raise exception.NotAuthorized() |
843 | + os.path.exists(path): |
844 | + raise exception.NotAuthorized() |
845 | |
846 | os.makedirs(path) |
847 | |
848 | - with open(path+'.json', 'w') as f: |
849 | + with open(path + '.json', 'w') as f: |
850 | json.dump({'ownerId': context.project_id}, f) |
851 | |
852 | @property |
853 | @@ -99,22 +100,25 @@ |
854 | @property |
855 | def owner_id(self): |
856 | try: |
857 | - with open(self.path+'.json') as f: |
858 | + with open(self.path + '.json') as f: |
859 | return json.load(f)['ownerId'] |
860 | except: |
861 | return None |
862 | |
863 | def is_authorized(self, context): |
864 | try: |
865 | - return context.user.is_admin() or self.owner_id == context.project_id |
866 | + return context.user.is_admin() or \ |
867 | + self.owner_id == context.project_id |
868 | except Exception, e: |
869 | return False |
870 | |
871 | def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): |
872 | object_names = [] |
873 | + path_length = len(self.path) |
874 | for root, dirs, files in os.walk(self.path): |
875 | for file_name in files: |
876 | - object_names.append(os.path.join(root, file_name)[len(self.path)+1:]) |
877 | + object_name = os.path.join(root, file_name)[path_length + 1:] |
878 | + object_names.append(object_name) |
879 | object_names.sort() |
880 | contents = [] |
881 | |
882 | @@ -164,7 +168,7 @@ |
883 | if len(os.listdir(self.path)) > 0: |
884 | raise exception.NotEmpty() |
885 | os.rmdir(self.path) |
886 | - os.remove(self.path+'.json') |
887 | + os.remove(self.path + '.json') |
888 | |
889 | def __getitem__(self, key): |
890 | return stored.Object(self, key) |
891 | |
892 | === modified file 'nova/objectstore/handler.py' |
893 | --- nova/objectstore/handler.py 2010-10-19 23:57:24 +0000 |
894 | +++ nova/objectstore/handler.py 2010-10-22 00:19:41 +0000 |
895 | @@ -136,6 +136,7 @@ |
896 | logging.debug("Authentication Failure: %s", ex) |
897 | raise exception.NotAuthorized() |
898 | |
899 | + |
900 | class ErrorHandlingResource(resource.Resource): |
901 | """Maps exceptions to 404 / 401 codes. Won't work for |
902 | exceptions thrown after NOT_DONE_YET is returned. |
903 | @@ -162,7 +163,7 @@ |
904 | def __init__(self): |
905 | ErrorHandlingResource.__init__(self) |
906 | |
907 | - def getChild(self, name, request): # pylint: disable-msg=C0103 |
908 | + def getChild(self, name, request): # pylint: disable-msg=C0103 |
909 | """Returns either the image or bucket resource""" |
910 | request.context = get_context(request) |
911 | if name == '': |
912 | @@ -172,7 +173,7 @@ |
913 | else: |
914 | return BucketResource(name) |
915 | |
916 | - def render_GET(self, request): # pylint: disable-msg=R0201 |
917 | + def render_GET(self, request): # pylint: disable-msg=R0201 |
918 | """Renders the GET request for a list of buckets as XML""" |
919 | logging.debug('List of buckets requested') |
920 | buckets = [b for b in bucket.Bucket.all() \ |
921 | @@ -321,11 +322,13 @@ |
922 | if not self.img.is_authorized(request.context, True): |
923 | raise exception.NotAuthorized() |
924 | return static.File(self.img.image_path, |
925 | - defaultType='application/octet-stream' |
926 | - ).render_GET(request) |
927 | + defaultType='application/octet-stream').\ |
928 | + render_GET(request) |
929 | + |
930 | |
931 | class ImagesResource(resource.Resource): |
932 | """A web resource representing a list of images""" |
933 | + |
934 | def getChild(self, name, _request): |
935 | """Returns itself or an ImageResource if no name given""" |
936 | if name == '': |
937 | @@ -333,7 +336,7 @@ |
938 | else: |
939 | return ImageResource(name) |
940 | |
941 | - def render_GET(self, request): # pylint: disable-msg=R0201 |
942 | + def render_GET(self, request): # pylint: disable-msg=R0201 |
943 | """ returns a json listing of all images |
944 | that a user has permissions to see """ |
945 | |
946 | @@ -362,7 +365,7 @@ |
947 | request.finish() |
948 | return server.NOT_DONE_YET |
949 | |
950 | - def render_PUT(self, request): # pylint: disable-msg=R0201 |
951 | + def render_PUT(self, request): # pylint: disable-msg=R0201 |
952 | """ create a new registered image """ |
953 | |
954 | image_id = get_argument(request, 'image_id', u'') |
955 | @@ -383,7 +386,7 @@ |
956 | p.start() |
957 | return '' |
958 | |
959 | - def render_POST(self, request): # pylint: disable-msg=R0201 |
960 | + def render_POST(self, request): # pylint: disable-msg=R0201 |
961 | """Update image attributes: public/private""" |
962 | |
963 | # image_id required for all requests |
964 | @@ -397,7 +400,7 @@ |
965 | if operation: |
966 | # operation implies publicity toggle |
967 | logging.debug("handling publicity toggle") |
968 | - image_object.set_public(operation=='add') |
969 | + image_object.set_public(operation == 'add') |
970 | else: |
971 | # other attributes imply update |
972 | logging.debug("update user fields") |
973 | @@ -407,7 +410,7 @@ |
974 | image_object.update_user_editable_fields(clean_args) |
975 | return '' |
976 | |
977 | - def render_DELETE(self, request): # pylint: disable-msg=R0201 |
978 | + def render_DELETE(self, request): # pylint: disable-msg=R0201 |
979 | """Delete a registered image""" |
980 | image_id = get_argument(request, "image_id", u"") |
981 | image_object = image.Image(image_id) |
982 | |
983 | === modified file 'nova/objectstore/image.py' |
984 | --- nova/objectstore/image.py 2010-10-14 05:07:43 +0000 |
985 | +++ nova/objectstore/image.py 2010-10-22 00:19:41 +0000 |
986 | @@ -48,8 +48,8 @@ |
987 | self.image_id = image_id |
988 | self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id)) |
989 | if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \ |
990 | - not os.path.isdir(self.path): |
991 | - raise exception.NotFound |
992 | + not os.path.isdir(self.path): |
993 | + raise exception.NotFound |
994 | |
995 | @property |
996 | def image_path(self): |
997 | @@ -127,8 +127,8 @@ |
998 | a string of the image id for the kernel |
999 | |
1000 | @type ramdisk: bool or str |
1001 | - @param ramdisk: either TRUE meaning this partition is a ramdisk image or |
1002 | - a string of the image id for the ramdisk |
1003 | + @param ramdisk: either TRUE meaning this partition is a ramdisk image |
1004 | + or a string of the image id for the ramdisk |
1005 | |
1006 | |
1007 | @type public: bool |
1008 | @@ -160,8 +160,7 @@ |
1009 | 'isPublic': public, |
1010 | 'architecture': 'x86_64', |
1011 | 'imageType': image_type, |
1012 | - 'state': 'available' |
1013 | - } |
1014 | + 'state': 'available'} |
1015 | |
1016 | if type(kernel) is str and len(kernel) > 0: |
1017 | info['kernelId'] = kernel |
1018 | @@ -180,7 +179,7 @@ |
1019 | os.makedirs(image_path) |
1020 | |
1021 | bucket_name = image_location.split("/")[0] |
1022 | - manifest_path = image_location[len(bucket_name)+1:] |
1023 | + manifest_path = image_location[len(bucket_name) + 1:] |
1024 | bucket_object = bucket.Bucket(bucket_name) |
1025 | |
1026 | manifest = ElementTree.fromstring(bucket_object[manifest_path].read()) |
1027 | @@ -204,10 +203,9 @@ |
1028 | 'imageId': image_id, |
1029 | 'imageLocation': image_location, |
1030 | 'imageOwnerId': context.project_id, |
1031 | - 'isPublic': False, # FIXME: grab public from manifest |
1032 | - 'architecture': 'x86_64', # FIXME: grab architecture from manifest |
1033 | - 'imageType' : image_type |
1034 | - } |
1035 | + 'isPublic': False, # FIXME: grab public from manifest |
1036 | + 'architecture': 'x86_64', # FIXME: grab architecture from manifest |
1037 | + 'imageType': image_type} |
1038 | |
1039 | if kernel_id: |
1040 | info['kernelId'] = kernel_id |
1041 | @@ -230,24 +228,29 @@ |
1042 | write_state('decrypting') |
1043 | |
1044 | # FIXME: grab kernelId and ramdiskId from bundle manifest |
1045 | - encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text) |
1046 | - encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text) |
1047 | + hex_key = manifest.find("image/ec2_encrypted_key").text |
1048 | + encrypted_key = binascii.a2b_hex(hex_key) |
1049 | + hex_iv = manifest.find("image/ec2_encrypted_iv").text |
1050 | + encrypted_iv = binascii.a2b_hex(hex_iv) |
1051 | cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem") |
1052 | |
1053 | decrypted_filename = os.path.join(image_path, 'image.tar.gz') |
1054 | - Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename) |
1055 | + Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, |
1056 | + cloud_private_key, decrypted_filename) |
1057 | |
1058 | write_state('untarring') |
1059 | |
1060 | image_file = Image.untarzip_image(image_path, decrypted_filename) |
1061 | - shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image')) |
1062 | + shutil.move(os.path.join(image_path, image_file), |
1063 | + os.path.join(image_path, 'image')) |
1064 | |
1065 | write_state('available') |
1066 | os.unlink(decrypted_filename) |
1067 | os.unlink(encrypted_filename) |
1068 | |
1069 | @staticmethod |
1070 | - def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): |
1071 | + def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, |
1072 | + cloud_private_key, decrypted_filename): |
1073 | key, err = utils.execute( |
1074 | 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, |
1075 | process_input=encrypted_key, |
1076 | @@ -259,13 +262,15 @@ |
1077 | process_input=encrypted_iv, |
1078 | check_exit_code=False) |
1079 | if err: |
1080 | - raise exception.Error("Failed to decrypt initialization vector: %s" % err) |
1081 | + raise exception.Error("Failed to decrypt initialization " |
1082 | + "vector: %s" % err) |
1083 | _out, err = utils.execute( |
1084 | 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' |
1085 | % (encrypted_filename, key, iv, decrypted_filename), |
1086 | check_exit_code=False) |
1087 | if err: |
1088 | - raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) |
1089 | + raise exception.Error("Failed to decrypt image file %s : %s" % |
1090 | + (encrypted_filename, err)) |
1091 | |
1092 | @staticmethod |
1093 | def untarzip_image(path, filename): |
1094 | |
1095 | === modified file 'nova/objectstore/stored.py' |
1096 | --- nova/objectstore/stored.py 2010-08-16 12:16:21 +0000 |
1097 | +++ nova/objectstore/stored.py 2010-10-22 00:19:41 +0000 |
1098 | @@ -50,8 +50,8 @@ |
1099 | return os.path.getmtime(self.path) |
1100 | |
1101 | def read(self): |
1102 | - """ read all contents of key into memory and return """ |
1103 | - return self.file.read() |
1104 | + """ read all contents of key into memory and return """ |
1105 | + return self.file.read() |
1106 | |
1107 | @property |
1108 | def file(self): |
1109 | |
1110 | === modified file 'nova/scheduler/driver.py' |
1111 | --- nova/scheduler/driver.py 2010-09-23 09:24:54 +0000 |
1112 | +++ nova/scheduler/driver.py 2010-10-22 00:19:41 +0000 |
1113 | @@ -31,10 +31,12 @@ |
1114 | flags.DEFINE_integer('service_down_time', 60, |
1115 | 'maximum time since last checkin for up service') |
1116 | |
1117 | + |
1118 | class NoValidHost(exception.Error): |
1119 | """There is no valid host for the command.""" |
1120 | pass |
1121 | |
1122 | + |
1123 | class Scheduler(object): |
1124 | """The base class that all Scheduler clases should inherit from.""" |
1125 | |
1126 | |
1127 | === modified file 'nova/scheduler/manager.py' |
1128 | --- nova/scheduler/manager.py 2010-10-14 05:05:21 +0000 |
1129 | +++ nova/scheduler/manager.py 2010-10-22 00:19:41 +0000 |
1130 | @@ -56,7 +56,8 @@ |
1131 | driver_method = 'schedule_%s' % method |
1132 | elevated = context.elevated() |
1133 | try: |
1134 | - host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) |
1135 | + host = getattr(self.driver, driver_method)(elevated, *args, |
1136 | + **kwargs) |
1137 | except AttributeError: |
1138 | host = self.driver.schedule(elevated, topic, *args, **kwargs) |
1139 | |
1140 | |
1141 | === modified file 'nova/scheduler/simple.py' |
1142 | --- nova/scheduler/simple.py 2010-09-12 03:00:56 +0000 |
1143 | +++ nova/scheduler/simple.py 2010-10-22 00:19:41 +0000 |
1144 | @@ -36,6 +36,7 @@ |
1145 | flags.DEFINE_integer("max_networks", 1000, |
1146 | "maximum number of networks to allow per host") |
1147 | |
1148 | + |
1149 | class SimpleScheduler(chance.ChanceScheduler): |
1150 | """Implements Naive Scheduler that tries to find least loaded host.""" |
1151 | |
1152 | |
1153 | === modified file 'nova/virt/fake.py' |
1154 | --- nova/virt/fake.py 2010-09-20 09:46:18 +0000 |
1155 | +++ nova/virt/fake.py 2010-10-22 00:19:41 +0000 |
1156 | @@ -226,6 +226,7 @@ |
1157 | def get_console_output(self, instance): |
1158 | return 'FAKE CONSOLE OUTPUT' |
1159 | |
1160 | + |
1161 | class FakeInstance(object): |
1162 | def __init__(self): |
1163 | self._state = power_state.NOSTATE |
1164 | |
1165 | === modified file 'nova/virt/images.py' |
1166 | --- nova/virt/images.py 2010-10-07 14:03:43 +0000 |
1167 | +++ nova/virt/images.py 2010-10-22 00:19:41 +0000 |
1168 | @@ -62,8 +62,8 @@ |
1169 | headers['Authorization'] = 'AWS %s:%s' % (access, signature) |
1170 | |
1171 | cmd = ['/usr/bin/curl', '--fail', '--silent', url] |
1172 | - for (k,v) in headers.iteritems(): |
1173 | - cmd += ['-H', '%s: %s' % (k,v)] |
1174 | + for (k, v) in headers.iteritems(): |
1175 | + cmd += ['-H', '%s: %s' % (k, v)] |
1176 | |
1177 | cmd += ['-o', path] |
1178 | return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) |
1179 | |
1180 | === modified file 'nova/virt/libvirt_conn.py' |
1181 | --- nova/virt/libvirt_conn.py 2010-10-18 20:32:45 +0000 |
1182 | +++ nova/virt/libvirt_conn.py 2010-10-22 00:19:41 +0000 |
1183 | @@ -62,7 +62,8 @@ |
1184 | 'Template file for injected network') |
1185 | flags.DEFINE_string('libvirt_type', |
1186 | 'kvm', |
1187 | - 'Libvirt domain type (valid options are: kvm, qemu, uml, xen)') |
1188 | + 'Libvirt domain type (valid options are: ' |
1189 | + 'kvm, qemu, uml, xen)') |
1190 | flags.DEFINE_string('libvirt_uri', |
1191 | '', |
1192 | 'Override the default libvirt URI (which is dependent' |
1193 | @@ -96,7 +97,8 @@ |
1194 | def _conn(self): |
1195 | if not self._wrapped_conn or not self._test_connection(): |
1196 | logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) |
1197 | - self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) |
1198 | + self._wrapped_conn = self._connect(self.libvirt_uri, |
1199 | + self.read_only) |
1200 | return self._wrapped_conn |
1201 | |
1202 | def _test_connection(self): |
1203 | @@ -150,6 +152,7 @@ |
1204 | # WE'LL save this for when we do shutdown, |
1205 | # instead of destroy - but destroy returns immediately |
1206 | timer = task.LoopingCall(f=None) |
1207 | + |
1208 | def _wait_for_shutdown(): |
1209 | try: |
1210 | state = self.get_info(instance['name'])['state'] |
1211 | @@ -164,6 +167,7 @@ |
1212 | power_state.SHUTDOWN) |
1213 | timer.stop() |
1214 | d.callback(None) |
1215 | + |
1216 | timer.f = _wait_for_shutdown |
1217 | timer.start(interval=0.5, now=True) |
1218 | return d |
1219 | @@ -201,6 +205,7 @@ |
1220 | |
1221 | d = defer.Deferred() |
1222 | timer = task.LoopingCall(f=None) |
1223 | + |
1224 | def _wait_for_reboot(): |
1225 | try: |
1226 | state = self.get_info(instance['name'])['state'] |
1227 | @@ -217,6 +222,7 @@ |
1228 | power_state.SHUTDOWN) |
1229 | timer.stop() |
1230 | d.callback(None) |
1231 | + |
1232 | timer.f = _wait_for_reboot |
1233 | timer.start(interval=0.5, now=True) |
1234 | yield d |
1235 | @@ -229,7 +235,8 @@ |
1236 | instance['id'], |
1237 | power_state.NOSTATE, |
1238 | 'launching') |
1239 | - yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) |
1240 | + yield NWFilterFirewall(self._conn).\ |
1241 | + setup_nwfilters_for_instance(instance) |
1242 | yield self._create_image(instance, xml) |
1243 | yield self._conn.createXML(xml, 0) |
1244 | # TODO(termie): this should actually register |
1245 | @@ -238,6 +245,7 @@ |
1246 | |
1247 | local_d = defer.Deferred() |
1248 | timer = task.LoopingCall(f=None) |
1249 | + |
1250 | def _wait_for_boot(): |
1251 | try: |
1252 | state = self.get_info(instance['name'])['state'] |
1253 | @@ -265,8 +273,9 @@ |
1254 | |
1255 | if virsh_output.startswith('/dev/'): |
1256 | logging.info('cool, it\'s a device') |
1257 | - d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) |
1258 | - d.addCallback(lambda r:r[0]) |
1259 | + d = process.simple_execute("sudo dd if=%s iflag=nonblock" % |
1260 | + virsh_output, check_exit_code=False) |
1261 | + d.addCallback(lambda r: r[0]) |
1262 | return d |
1263 | else: |
1264 | return '' |
1265 | @@ -285,11 +294,15 @@ |
1266 | |
1267 | @exception.wrap_exception |
1268 | def get_console_output(self, instance): |
1269 | - console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') |
1270 | - d = process.simple_execute('sudo chown %d %s' % (os.getuid(), console_log)) |
1271 | + console_log = os.path.join(FLAGS.instances_path, instance['name'], |
1272 | + 'console.log') |
1273 | + d = process.simple_execute('sudo chown %d %s' % (os.getuid(), |
1274 | + console_log)) |
1275 | if FLAGS.libvirt_type == 'xen': |
1276 | # Xen is spethial |
1277 | - d.addCallback(lambda _: process.simple_execute("virsh ttyconsole %s" % instance['name'])) |
1278 | + d.addCallback(lambda _: |
1279 | + process.simple_execute("virsh ttyconsole %s" % |
1280 | + instance['name'])) |
1281 | d.addCallback(self._flush_xen_console) |
1282 | d.addCallback(self._append_to_file, console_log) |
1283 | else: |
1284 | @@ -297,7 +310,6 @@ |
1285 | d.addCallback(self._dump_file) |
1286 | return d |
1287 | |
1288 | - |
1289 | @defer.inlineCallbacks |
1290 | def _create_image(self, inst, libvirt_xml): |
1291 | # syntactic nicety |
1292 | @@ -309,7 +321,6 @@ |
1293 | yield process.simple_execute('mkdir -p %s' % basepath()) |
1294 | yield process.simple_execute('chmod 0777 %s' % basepath()) |
1295 | |
1296 | - |
1297 | # TODO(termie): these are blocking calls, it would be great |
1298 | # if they weren't. |
1299 | logging.info('instance %s: Creating image', inst['name']) |
1300 | @@ -317,17 +328,21 @@ |
1301 | f.write(libvirt_xml) |
1302 | f.close() |
1303 | |
1304 | - os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660)) |
1305 | + os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, |
1306 | + 0660)) |
1307 | |
1308 | user = manager.AuthManager().get_user(inst['user_id']) |
1309 | project = manager.AuthManager().get_project(inst['project_id']) |
1310 | |
1311 | if not os.path.exists(basepath('disk')): |
1312 | - yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) |
1313 | + yield images.fetch(inst.image_id, basepath('disk-raw'), user, |
1314 | + project) |
1315 | if not os.path.exists(basepath('kernel')): |
1316 | - yield images.fetch(inst.kernel_id, basepath('kernel'), user, project) |
1317 | + yield images.fetch(inst.kernel_id, basepath('kernel'), user, |
1318 | + project) |
1319 | if not os.path.exists(basepath('ramdisk')): |
1320 | - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) |
1321 | + yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, |
1322 | + project) |
1323 | |
1324 | execute = lambda cmd, process_input=None, check_exit_code=True: \ |
1325 | process.simple_execute(cmd=cmd, |
1326 | @@ -339,8 +354,8 @@ |
1327 | network_ref = db.network_get_by_instance(context.get_admin_context(), |
1328 | inst['id']) |
1329 | if network_ref['injected']: |
1330 | - address = db.instance_get_fixed_address(context.get_admin_context(), |
1331 | - inst['id']) |
1332 | + admin_context = context.get_admin_context() |
1333 | + address = db.instance_get_fixed_address(admin_context, inst['id']) |
1334 | with open(FLAGS.injected_network_template) as f: |
1335 | net = f.read() % {'address': address, |
1336 | 'netmask': network_ref['netmask'], |
1337 | @@ -354,7 +369,8 @@ |
1338 | if net: |
1339 | logging.info('instance %s: injecting net into image %s', |
1340 | inst['name'], inst.image_id) |
1341 | - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) |
1342 | + yield disk.inject_data(basepath('disk-raw'), key, net, |
1343 | + execute=execute) |
1344 | |
1345 | if os.path.exists(basepath('disk')): |
1346 | yield process.simple_execute('rm -f %s' % basepath('disk')) |
1347 | @@ -377,7 +393,8 @@ |
1348 | network = db.project_get_network(context.get_admin_context(), |
1349 | instance['project_id']) |
1350 | # FIXME(vish): stick this in db |
1351 | - instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']] |
1352 | + instance_type = instance['instance_type'] |
1353 | + instance_type = instance_types.INSTANCE_TYPES[instance_type] |
1354 | ip_address = db.instance_get_fixed_address(context.get_admin_context(), |
1355 | instance['id']) |
1356 | # Assume that the gateway also acts as the dhcp server. |
1357 | @@ -391,7 +408,7 @@ |
1358 | 'bridge_name': network['bridge'], |
1359 | 'mac_address': instance['mac_address'], |
1360 | 'ip_address': ip_address, |
1361 | - 'dhcp_server': dhcp_server } |
1362 | + 'dhcp_server': dhcp_server} |
1363 | libvirt_xml = self.libvirt_xml % xml_info |
1364 | logging.debug('instance %s: finished toXML method', instance['name']) |
1365 | |
1366 | @@ -506,7 +523,6 @@ |
1367 | domain = self._conn.lookupByName(instance_name) |
1368 | return domain.interfaceStats(interface) |
1369 | |
1370 | - |
1371 | def refresh_security_group(self, security_group_id): |
1372 | fw = NWFilterFirewall(self._conn) |
1373 | fw.ensure_security_group_filter(security_group_id) |
1374 | @@ -557,7 +573,6 @@ |
1375 | def __init__(self, get_connection): |
1376 | self._conn = get_connection |
1377 | |
1378 | - |
1379 | nova_base_filter = '''<filter name='nova-base' chain='root'> |
1380 | <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid> |
1381 | <filterref filter='no-mac-spoofing'/> |
1382 | @@ -578,7 +593,8 @@ |
1383 | srcportstart='68' |
1384 | dstportstart='67'/> |
1385 | </rule> |
1386 | - <rule action='accept' direction='in' priority='100'> |
1387 | + <rule action='accept' direction='in' |
1388 | + priority='100'> |
1389 | <udp srcipaddr='$DHCPSERVER' |
1390 | srcportstart='67' |
1391 | dstportstart='68'/> |
1392 | @@ -588,8 +604,8 @@ |
1393 | def nova_base_ipv4_filter(self): |
1394 | retval = "<filter name='nova-base-ipv4' chain='ipv4'>" |
1395 | for protocol in ['tcp', 'udp', 'icmp']: |
1396 | - for direction,action,priority in [('out','accept', 399), |
1397 | - ('inout','drop', 400)]: |
1398 | + for direction, action, priority in [('out', 'accept', 399), |
1399 | + ('inout', 'drop', 400)]: |
1400 | retval += """<rule action='%s' direction='%s' priority='%d'> |
1401 | <%s /> |
1402 | </rule>""" % (action, direction, |
1403 | @@ -597,12 +613,11 @@ |
1404 | retval += '</filter>' |
1405 | return retval |
1406 | |
1407 | - |
1408 | def nova_base_ipv6_filter(self): |
1409 | retval = "<filter name='nova-base-ipv6' chain='ipv6'>" |
1410 | for protocol in ['tcp', 'udp', 'icmp']: |
1411 | - for direction,action,priority in [('out','accept',399), |
1412 | - ('inout','drop',400)]: |
1413 | + for direction, action, priority in [('out', 'accept', 399), |
1414 | + ('inout', 'drop', 400)]: |
1415 | retval += """<rule action='%s' direction='%s' priority='%d'> |
1416 | <%s-ipv6 /> |
1417 | </rule>""" % (action, direction, |
1418 | @@ -610,7 +625,6 @@ |
1419 | retval += '</filter>' |
1420 | return retval |
1421 | |
1422 | - |
1423 | def nova_project_filter(self, project, net, mask): |
1424 | retval = "<filter name='nova-project-%s' chain='ipv4'>" % project |
1425 | for protocol in ['tcp', 'udp', 'icmp']: |
1426 | @@ -620,14 +634,12 @@ |
1427 | retval += '</filter>' |
1428 | return retval |
1429 | |
1430 | - |
1431 | def _define_filter(self, xml): |
1432 | if callable(xml): |
1433 | xml = xml() |
1434 | d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) |
1435 | return d |
1436 | |
1437 | - |
1438 | @staticmethod |
1439 | def _get_net_and_mask(cidr): |
1440 | net = IPy.IP(cidr) |
1441 | @@ -646,9 +658,9 @@ |
1442 | yield self._define_filter(self.nova_dhcp_filter) |
1443 | yield self._define_filter(self.nova_base_filter) |
1444 | |
1445 | - nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" + |
1446 | - " <filterref filter='nova-base' />\n" |
1447 | - ) % instance['name'] |
1448 | + nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \ |
1449 | + " <filterref filter='nova-base' />\n" % \ |
1450 | + instance['name'] |
1451 | |
1452 | if FLAGS.allow_project_net_traffic: |
1453 | network_ref = db.project_get_network(context.get_admin_context(), |
1454 | @@ -658,14 +670,14 @@ |
1455 | net, mask) |
1456 | yield self._define_filter(project_filter) |
1457 | |
1458 | - nwfilter_xml += (" <filterref filter='nova-project-%s' />\n" |
1459 | - ) % instance['project_id'] |
1460 | + nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \ |
1461 | + instance['project_id'] |
1462 | |
1463 | for security_group in instance.security_groups: |
1464 | yield self.ensure_security_group_filter(security_group['id']) |
1465 | |
1466 | - nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" |
1467 | - ) % security_group['id'] |
1468 | + nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \ |
1469 | + security_group['id'] |
1470 | nwfilter_xml += "</filter>" |
1471 | |
1472 | yield self._define_filter(nwfilter_xml) |
1473 | @@ -675,7 +687,6 @@ |
1474 | return self._define_filter( |
1475 | self.security_group_to_nwfilter_xml(security_group_id)) |
1476 | |
1477 | - |
1478 | def security_group_to_nwfilter_xml(self, security_group_id): |
1479 | security_group = db.security_group_get(context.get_admin_context(), |
1480 | security_group_id) |
1481 | @@ -684,12 +695,15 @@ |
1482 | rule_xml += "<rule action='accept' direction='in' priority='300'>" |
1483 | if rule.cidr: |
1484 | net, mask = self._get_net_and_mask(rule.cidr) |
1485 | - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask) |
1486 | + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
1487 | + (rule.protocol, net, mask) |
1488 | if rule.protocol in ['tcp', 'udp']: |
1489 | rule_xml += "dstportstart='%s' dstportend='%s' " % \ |
1490 | (rule.from_port, rule.to_port) |
1491 | elif rule.protocol == 'icmp': |
1492 | - logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port)) |
1493 | + logging.info('rule.protocol: %r, rule.from_port: %r, ' |
1494 | + 'rule.to_port: %r' % |
1495 | + (rule.protocol, rule.from_port, rule.to_port)) |
1496 | if rule.from_port != -1: |
1497 | rule_xml += "type='%s' " % rule.from_port |
1498 | if rule.to_port != -1: |
1499 | @@ -697,5 +711,6 @@ |
1500 | |
1501 | rule_xml += '/>\n' |
1502 | rule_xml += "</rule>\n" |
1503 | - xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,) |
1504 | + xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \ |
1505 | + (security_group_id, rule_xml,) |
1506 | return xml |
1507 | |
1508 | === modified file 'nova/virt/xenapi.py' |
1509 | --- nova/virt/xenapi.py 2010-10-04 20:32:00 +0000 |
1510 | +++ nova/virt/xenapi.py 2010-10-22 00:19:41 +0000 |
1511 | @@ -75,12 +75,11 @@ |
1512 | |
1513 | |
1514 | XENAPI_POWER_STATE = { |
1515 | - 'Halted' : power_state.SHUTDOWN, |
1516 | - 'Running' : power_state.RUNNING, |
1517 | - 'Paused' : power_state.PAUSED, |
1518 | - 'Suspended': power_state.SHUTDOWN, # FIXME |
1519 | - 'Crashed' : power_state.CRASHED |
1520 | -} |
1521 | + 'Halted': power_state.SHUTDOWN, |
1522 | + 'Running': power_state.RUNNING, |
1523 | + 'Paused': power_state.PAUSED, |
1524 | + 'Suspended': power_state.SHUTDOWN, # FIXME |
1525 | + 'Crashed': power_state.CRASHED} |
1526 | |
1527 | |
1528 | def get_connection(_): |
1529 | @@ -90,12 +89,15 @@ |
1530 | # library when not using XenAPI. |
1531 | global XenAPI |
1532 | if XenAPI is None: |
1533 | - XenAPI = __import__('XenAPI') |
1534 | + XenAPI = __import__('XenAPI') |
1535 | url = FLAGS.xenapi_connection_url |
1536 | username = FLAGS.xenapi_connection_username |
1537 | password = FLAGS.xenapi_connection_password |
1538 | if not url or password is None: |
1539 | - raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi') |
1540 | + raise Exception('Must specify xenapi_connection_url, ' |
1541 | + 'xenapi_connection_username (optionally), and ' |
1542 | + 'xenapi_connection_password to use ' |
1543 | + 'connection_type=xenapi') |
1544 | return XenAPIConnection(url, username, password) |
1545 | |
1546 | |
1547 | @@ -141,7 +143,7 @@ |
1548 | def _create_vm(self, instance, kernel, ramdisk): |
1549 | """Create a VM record. Returns a Deferred that gives the new |
1550 | VM reference.""" |
1551 | - |
1552 | + |
1553 | instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] |
1554 | mem = str(long(instance_type['memory_mb']) * 1024 * 1024) |
1555 | vcpus = str(instance_type['vcpus']) |
1556 | @@ -183,7 +185,7 @@ |
1557 | def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): |
1558 | """Create a VBD record. Returns a Deferred that gives the new |
1559 | VBD reference.""" |
1560 | - |
1561 | + |
1562 | vbd_rec = {} |
1563 | vbd_rec['VM'] = vm_ref |
1564 | vbd_rec['VDI'] = vdi_ref |
1565 | @@ -207,10 +209,10 @@ |
1566 | def _create_vif(self, vm_ref, network_ref, mac_address): |
1567 | """Create a VIF record. Returns a Deferred that gives the new |
1568 | VIF reference.""" |
1569 | - |
1570 | + |
1571 | vif_rec = {} |
1572 | vif_rec['device'] = '0' |
1573 | - vif_rec['network']= network_ref |
1574 | + vif_rec['network'] = network_ref |
1575 | vif_rec['VM'] = vm_ref |
1576 | vif_rec['MAC'] = mac_address |
1577 | vif_rec['MTU'] = '1500' |
1578 | @@ -303,7 +305,7 @@ |
1579 | |
1580 | def _lookup_blocking(self, i): |
1581 | vms = self._conn.xenapi.VM.get_by_name_label(i) |
1582 | - n = len(vms) |
1583 | + n = len(vms) |
1584 | if n == 0: |
1585 | return None |
1586 | elif n > 1: |
1587 | |
1588 | === modified file 'nova/volume/driver.py' |
1589 | --- nova/volume/driver.py 2010-09-12 15:16:59 +0000 |
1590 | +++ nova/volume/driver.py 2010-10-22 00:19:41 +0000 |
1591 | @@ -61,7 +61,6 @@ |
1592 | "Try number %s", tries) |
1593 | yield self._execute("sleep %s" % tries ** 2) |
1594 | |
1595 | - |
1596 | @defer.inlineCallbacks |
1597 | def create_volume(self, volume_name, size): |
1598 | """Creates a logical volume""" |
lgtm