Merge lp:~jk0/nova/improve-pylint-scores into lp:~hudson-openstack/nova/trunk

Proposed by Josh Kearney
Status: Merged
Approved by: Vish Ishaya
Approved revision: no longer in the source branch.
Merged at revision: 1005
Proposed branch: lp:~jk0/nova/improve-pylint-scores
Merge into: lp:~hudson-openstack/nova/trunk
Diff against target: 156 lines (+10/-19)
4 files modified
nova/compute/manager.py (+3/-5)
nova/virt/xenapi/fake.py (+1/-1)
nova/virt/xenapi/vm_utils.py (+2/-7)
nova/virt/xenapi/vmops.py (+4/-6)
To merge this branch: bzr merge lp:~jk0/nova/improve-pylint-scores
Reviewer Review Type Date Requested Status
Vish Ishaya (community) Approve
Jay Pipes (community) Approve
Review via email: mp+58318@code.launchpad.net

Description of the change

Round 1 of pylint cleanup.

To post a comment you must log in.
Revision history for this message
Jay Pipes (jaypipes) wrote :

Interesting so many db API calls removed in the compute manager that weren't used... lgtm.

review: Approve
lp:~jk0/nova/improve-pylint-scores updated
1004. By Dan Prince

Implement quotas for the new v1.1 server metadata controller.

Created a new _check_metadata_properties_quota method in the compute API that is used when creating instances and when updating server metadata. In doing so I modified the compute API so that metadata is a dict (not an array) to ensure we are using unique key values for metadata (which is implied by the API specs) and makes more sense with JSON request formats anyway.

Additionally this branch enables and fixes the integration test to create servers with metadata.

Revision history for this message
Vish Ishaya (vishvananda) wrote :

16 - service = self.db.service_get_by_host_and_topic(context,
17 - migration_ref['dest_compute'], FLAGS.compute_topic)

This seems intended to check if the service exists before sending a message to it. Perhaps you should leave it in with a note (and perhaps _service =)

Otherwise LGTM

review: Needs Information
Revision history for this message
Josh Kearney (jk0) wrote :

> 16 - service = self.db.service_get_by_host_and_topic(context,
> 17 - migration_ref['dest_compute'], FLAGS.compute_topic)
>
> This seems intended to check if the service exists before sending a message to
> it. Perhaps you should leave it in with a note (and perhaps _service =)
>
> Otherwise LGTM

Done, thanks!

Revision history for this message
Vish Ishaya (vishvananda) wrote :

cool lgtm

review: Approve
lp:~jk0/nova/improve-pylint-scores updated
1005. By Josh Kearney

Round 1 of pylint cleanup.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'nova/compute/manager.py'
2--- nova/compute/manager.py 2011-04-12 16:21:29 +0000
3+++ nova/compute/manager.py 2011-04-19 18:31:35 +0000
4@@ -434,7 +434,6 @@
5 """Destroys the source instance"""
6 context = context.elevated()
7 instance_ref = self.db.instance_get(context, instance_id)
8- migration_ref = self.db.migration_get(context, migration_id)
9 self.driver.destroy(instance_ref)
10
11 @exception.wrap_exception
12@@ -525,8 +524,9 @@
13 self.db.migration_update(context, migration_id,
14 {'status': 'post-migrating', })
15
16- service = self.db.service_get_by_host_and_topic(context,
17- migration_ref['dest_compute'], FLAGS.compute_topic)
18+ # Make sure the service exists before sending a message.
19+ _service = self.db.service_get_by_host_and_topic(context,
20+ migration_ref['dest_compute'], FLAGS.compute_topic)
21 topic = self.db.queue_get_for(context, FLAGS.compute_topic,
22 migration_ref['dest_compute'])
23 rpc.cast(context, topic,
24@@ -652,7 +652,6 @@
25
26 """
27 context = context.elevated()
28- instance_ref = self.db.instance_get(context, instance_id)
29
30 LOG.debug(_('instance %s: locking'), instance_id, context=context)
31 self.db.instance_update(context, instance_id, {'locked': True})
32@@ -664,7 +663,6 @@
33
34 """
35 context = context.elevated()
36- instance_ref = self.db.instance_get(context, instance_id)
37
38 LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
39 self.db.instance_update(context, instance_id, {'locked': False})
40
41=== modified file 'nova/virt/xenapi/fake.py'
42--- nova/virt/xenapi/fake.py 2011-03-28 21:00:17 +0000
43+++ nova/virt/xenapi/fake.py 2011-04-19 18:31:35 +0000
44@@ -294,7 +294,7 @@
45 def __str__(self):
46 try:
47 return str(self.details)
48- except Exception, exc:
49+ except Exception:
50 return "XenAPI Fake Failure: %s" % str(self.details)
51
52 def _details_map(self):
53
54=== modified file 'nova/virt/xenapi/vm_utils.py'
55--- nova/virt/xenapi/vm_utils.py 2011-04-08 18:21:36 +0000
56+++ nova/virt/xenapi/vm_utils.py 2011-04-19 18:31:35 +0000
57@@ -28,10 +28,7 @@
58 import uuid
59 from xml.dom import minidom
60
61-from eventlet import event
62 import glance.client
63-from nova import context
64-from nova import db
65 from nova import exception
66 from nova import flags
67 from nova import log as logging
68@@ -306,7 +303,6 @@
69 % locals())
70
71 vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
72- vm_vdi_uuid = vm_vdi_rec["uuid"]
73 sr_ref = vm_vdi_rec["SR"]
74
75 original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
76@@ -755,14 +751,14 @@
77 session.call_xenapi('SR.scan', sr_ref)
78
79
80-def get_rrd(host, uuid):
81+def get_rrd(host, vm_uuid):
82 """Return the VM RRD XML as a string"""
83 try:
84 xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
85 FLAGS.xenapi_connection_username,
86 FLAGS.xenapi_connection_password,
87 host,
88- uuid))
89+ vm_uuid))
90 return xml.read()
91 except IOError:
92 return None
93@@ -1020,7 +1016,6 @@
94
95 def _write_partition(virtual_size, dev):
96 dest = '/dev/%s' % dev
97- mbr_last = MBR_SIZE_SECTORS - 1
98 primary_first = MBR_SIZE_SECTORS
99 primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
100
101
102=== modified file 'nova/virt/xenapi/vmops.py'
103--- nova/virt/xenapi/vmops.py 2011-04-07 21:08:16 +0000
104+++ nova/virt/xenapi/vmops.py 2011-04-19 18:31:35 +0000
105@@ -387,7 +387,6 @@
106
107 def link_disks(self, instance, base_copy_uuid, cow_uuid):
108 """Links the base copy VHD to the COW via the XAPI plugin."""
109- vm_ref = VMHelper.lookup(self._session, instance.name)
110 new_base_copy_uuid = str(uuid.uuid4())
111 new_cow_uuid = str(uuid.uuid4())
112 params = {'instance_id': instance.id,
113@@ -760,7 +759,6 @@
114 instance)))
115
116 for vm in rescue_vms:
117- rescue_name = vm["name"]
118 rescue_vm_ref = vm["vm_ref"]
119
120 self._destroy_rescue_instance(rescue_vm_ref)
121@@ -798,7 +796,7 @@
122 def _get_network_info(self, instance):
123 """Creates network info list for instance."""
124 admin_context = context.get_admin_context()
125- IPs = db.fixed_ip_get_all_by_instance(admin_context,
126+ ips = db.fixed_ip_get_all_by_instance(admin_context,
127 instance['id'])
128 networks = db.network_get_all_by_instance(admin_context,
129 instance['id'])
130@@ -808,7 +806,7 @@
131
132 network_info = []
133 for network in networks:
134- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
135+ network_ips = [ip for ip in ips if ip.network_id == network.id]
136
137 def ip_dict(ip):
138 return {
139@@ -830,7 +828,7 @@
140 'mac': instance.mac_address,
141 'rxtx_cap': inst_type['rxtx_cap'],
142 'dns': [network['dns']],
143- 'ips': [ip_dict(ip) for ip in network_IPs]}
144+ 'ips': [ip_dict(ip) for ip in network_ips]}
145 if network['cidr_v6']:
146 info['ip6s'] = [ip6_dict()]
147 if network['gateway_v6']:
148@@ -923,7 +921,7 @@
149 try:
150 ret = self._make_xenstore_call('read_record', vm, path,
151 {'ignore_missing_path': 'True'})
152- except self.XenAPI.Failure, e:
153+ except self.XenAPI.Failure:
154 return None
155 ret = json.loads(ret)
156 if ret == "None":