Merge lp:~soren/nova/lp661262 into lp:~hudson-openstack/nova/trunk

Proposed by Soren Hansen
Status: Merged
Approved by: Vish Ishaya
Approved revision: 605
Merged at revision: 618
Proposed branch: lp:~soren/nova/lp661262
Merge into: lp:~hudson-openstack/nova/trunk
Diff against target: 131 lines (+47/-7)
7 files modified
nova/compute/manager.py (+1/-1)
nova/db/api.py (+5/-0)
nova/db/sqlalchemy/api.py (+11/-0)
nova/virt/fake.py (+3/-2)
nova/virt/hyperv.py (+1/-1)
nova/virt/libvirt_conn.py (+25/-2)
nova/virt/xenapi_conn.py (+1/-1)
To merge this branch: bzr merge lp:~soren/nova/lp661262
Reviewer Review Type Date Requested Status
Devin Carlen (community) Approve
Vish Ishaya (community) Approve
Review via email: mp+47454@code.launchpad.net

Description of the change

Add a host argument to virt drivers's init_host method. It will be set to the name of host it's running on.

Make libvirt's init_host method go and look at what virtual machines are running when the compute worker starts up.

This ensures firewalls are set up correctly for existing VM's. It also enables easier rolling upgrades.

To post a comment you must log in.
Revision history for this message
Vish Ishaya (vishvananda) wrote :

very nice. Now all we need is to update the periodic callback to go through the instances and the current state to catch crashes.

review: Approve
Revision history for this message
Devin Carlen (devcamcar) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'nova/compute/manager.py'
2--- nova/compute/manager.py 2011-01-19 15:41:30 +0000
3+++ nova/compute/manager.py 2011-01-25 19:56:14 +0000
4@@ -118,7 +118,7 @@
5 """Do any initialization that needs to be run if this is a
6 standalone service.
7 """
8- self.driver.init_host()
9+ self.driver.init_host(host=self.host)
10
11 def _update_state(self, context, instance_id):
12 """Update the state of an instance from the driver info."""
13
14=== modified file 'nova/db/api.py'
15--- nova/db/api.py 2011-01-18 19:01:16 +0000
16+++ nova/db/api.py 2011-01-25 19:56:14 +0000
17@@ -351,6 +351,11 @@
18 return IMPL.instance_get_all_by_project(context, project_id)
19
20
21+def instance_get_all_by_host(context, host):
22+ """Get all instance belonging to a host."""
23+ return IMPL.instance_get_all_by_host(context, host)
24+
25+
26 def instance_get_all_by_reservation(context, reservation_id):
27 """Get all instance belonging to a reservation."""
28 return IMPL.instance_get_all_by_reservation(context, reservation_id)
29
30=== modified file 'nova/db/sqlalchemy/api.py'
31--- nova/db/sqlalchemy/api.py 2011-01-21 21:10:26 +0000
32+++ nova/db/sqlalchemy/api.py 2011-01-25 19:56:14 +0000
33@@ -724,6 +724,17 @@
34 all()
35
36
37+@require_admin_context
38+def instance_get_all_by_host(context, host):
39+ session = get_session()
40+ return session.query(models.Instance).\
41+ options(joinedload_all('fixed_ip.floating_ips')).\
42+ options(joinedload('security_groups')).\
43+ filter_by(host=host).\
44+ filter_by(deleted=can_read_deleted(context)).\
45+ all()
46+
47+
48 @require_context
49 def instance_get_all_by_project(context, project_id):
50 authorize_project_context(context, project_id)
51
52=== modified file 'nova/virt/fake.py'
53--- nova/virt/fake.py 2011-01-18 22:55:03 +0000
54+++ nova/virt/fake.py 2011-01-25 19:56:14 +0000
55@@ -76,9 +76,10 @@
56 cls._instance = cls()
57 return cls._instance
58
59- def init_host(self):
60+ def init_host(self, host):
61 """
62- Initialize anything that is necessary for the driver to function
63+ Initialize anything that is necessary for the driver to function,
64+ including catching up with currently running VM's on the given host.
65 """
66 return
67
68
69=== modified file 'nova/virt/hyperv.py'
70--- nova/virt/hyperv.py 2011-01-19 20:26:09 +0000
71+++ nova/virt/hyperv.py 2011-01-25 19:56:14 +0000
72@@ -113,7 +113,7 @@
73 self._conn = wmi.WMI(moniker='//./root/virtualization')
74 self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
75
76- def init_host(self):
77+ def init_host(self, host):
78 #FIXME(chiradeep): implement this
79 LOG.debug(_('In init host'))
80 pass
81
82=== modified file 'nova/virt/libvirt_conn.py'
83--- nova/virt/libvirt_conn.py 2011-01-25 12:44:26 +0000
84+++ nova/virt/libvirt_conn.py 2011-01-25 19:56:14 +0000
85@@ -157,8 +157,31 @@
86 else:
87 self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
88
89- def init_host(self):
90- pass
91+ def init_host(self, host):
92+ # Adopt existing VM's running here
93+ ctxt = context.get_admin_context()
94+ for instance in db.instance_get_all_by_host(ctxt, host):
95+ try:
96+ LOG.debug(_('Checking state of %s'), instance['name'])
97+ state = self.get_info(instance['name'])['state']
98+ except exception.NotFound:
99+ state = power_state.SHUTOFF
100+
101+ LOG.debug(_('Current state of %(name)s was %(state)s.'),
102+ {'name': instance['name'], 'state': state})
103+ db.instance_set_state(ctxt, instance['id'], state)
104+
105+ if state == power_state.SHUTOFF:
106+ # TODO(soren): This is what the compute manager does when you
107+ # terminate # an instance. At some point I figure we'll have a
108+ # "terminated" state and some sort of cleanup job that runs
109+ # occasionally, cleaning them out.
110+ db.instance_destroy(ctxt, instance['id'])
111+
112+ if state != power_state.RUNNING:
113+ continue
114+ self.firewall_driver.prepare_instance_filter(instance)
115+ self.firewall_driver.apply_instance_filter(instance)
116
117 def _get_connection(self):
118 if not self._wrapped_conn or not self._test_connection():
119
120=== modified file 'nova/virt/xenapi_conn.py'
121--- nova/virt/xenapi_conn.py 2011-01-24 15:19:51 +0000
122+++ nova/virt/xenapi_conn.py 2011-01-25 19:56:14 +0000
123@@ -141,7 +141,7 @@
124 self._vmops = VMOps(session)
125 self._volumeops = VolumeOps(session)
126
127- def init_host(self):
128+ def init_host(self, host):
129 #FIXME(armando): implement this
130 #NOTE(armando): would we need a method
131 #to call when shutting down the host?