Merge lp:~cbehrens/nova/dist-sched-enhancements into lp:~hudson-openstack/nova/trunk
- dist-sched-enhancements
- Merge into trunk
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | Josh Kearney | ||||
Approved revision: | 1229 | ||||
Merged at revision: | 1230 | ||||
Proposed branch: | lp:~cbehrens/nova/dist-sched-enhancements | ||||
Merge into: | lp:~hudson-openstack/nova/trunk | ||||
Diff against target: |
773 lines (+301/-138) 13 files modified
nova/api/openstack/create_instance_helper.py (+12/-1) nova/api/openstack/servers.py (+16/-8) nova/compute/api.py (+52/-38) nova/scheduler/api.py (+35/-14) nova/scheduler/host_filter.py (+5/-3) nova/scheduler/least_cost.py (+33/-15) nova/scheduler/zone_aware_scheduler.py (+96/-27) nova/tests/scheduler/test_least_cost_scheduler.py (+6/-5) nova/tests/scheduler/test_scheduler.py (+2/-2) nova/tests/scheduler/test_zone_aware_scheduler.py (+19/-24) nova/tests/test_utils.py (+13/-0) nova/utils.py (+11/-0) tools/pip-requires (+1/-1) |
||||
To merge this branch: | bzr merge lp:~cbehrens/nova/dist-sched-enhancements | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Josh Kearney (community) | Approve | ||
Rick Harris (community) | Approve | ||
Ed Leafe (community) | Approve | ||
Sandy Walsh | Pending | ||
Review via email: mp+66338@code.launchpad.net |
Commit message
Description of the change
compute_api.get_all should be able to recurse zones (bug 744217).
Also, allow to build more than one instance at once with zone_aware_
Other cleanups with regards to zone aware scheduler...
Chris Behrens (cbehrens) wrote : | # |
Oops, yeah. 8-15 doesn't make complete sense, does it? hehe. Will fix.
45-51, 168-174: Good call.
485: Yeah, that message is not quite right. I originally had that message in weigh_hosts in least_cost.py where it made sense at the time.
Rick Harris (rconradharris) wrote : | # |
Looks solid overall.
> 13 + if not min_count:
> 14 + min_count = 1
> 15 + else:
> 16 + min_count = int(min_count)
This looks like a double-negative: if NOT then do this; if NOT NOT then do that. Might be clearer as:
if min_count:
min_count = int(min_count)
else:
min_count = 1
or even:
min_count = int(min_count) if min_count else 1
This is the kind of nit which is personal-preference so feel free to disregard
if you don't agree :)
> 50 + recurse_zones = query_str.
> 51 + recurse_zones = recurse_zones and True or False
Won't this mean if you pass 'False' or '0' that recurse_zones is interpreted
as True. Is that expected behavior?
If we don't already have anything in utils yet, might be worth adding a
bool_from_str function. Something like:
def bool_from_str(val):
return val.lower() in ('true', '1')
recurse_zones = utils.bool_
Chris Behrens (cbehrens) wrote : | # |
Ah, the recurse_zones thing.. I mistakenly was thinking the variable was an int value. But you're right. It's a string. Probably worth adding that call in utils.
I'll fix up the min_count thing, also.. I tend to like using a positive expression when there's an 'else' case as well..
Rick Harris (rconradharris) wrote : | # |
Looks good; thanks for adding the bool_from_str tests.
Preview Diff
1 | === modified file 'nova/api/openstack/create_instance_helper.py' |
2 | --- nova/api/openstack/create_instance_helper.py 2011-06-17 17:17:14 +0000 |
3 | +++ nova/api/openstack/create_instance_helper.py 2011-06-29 22:27:29 +0000 |
4 | @@ -114,6 +114,15 @@ |
5 | name = name.strip() |
6 | |
7 | reservation_id = body['server'].get('reservation_id') |
8 | + min_count = body['server'].get('min_count') |
9 | + max_count = body['server'].get('max_count') |
10 | + # min_count and max_count are optional. If they exist, they come |
11 | + # in as strings. We want to default 'min_count' to 1, and default |
12 | + # 'max_count' to be 'min_count'. |
13 | + min_count = int(min_count) if min_count else 1 |
14 | + max_count = int(max_count) if max_count else min_count |
15 | + if min_count > max_count: |
16 | + min_count = max_count |
17 | |
18 | try: |
19 | inst_type = \ |
20 | @@ -137,7 +146,9 @@ |
21 | injected_files=injected_files, |
22 | admin_password=password, |
23 | zone_blob=zone_blob, |
24 | - reservation_id=reservation_id)) |
25 | + reservation_id=reservation_id, |
26 | + min_count=min_count, |
27 | + max_count=max_count)) |
28 | except quota.QuotaError as error: |
29 | self._handle_quota_error(error) |
30 | except exception.ImageNotFound as error: |
31 | |
32 | === modified file 'nova/api/openstack/servers.py' |
33 | --- nova/api/openstack/servers.py 2011-06-15 20:58:55 +0000 |
34 | +++ nova/api/openstack/servers.py 2011-06-29 22:27:29 +0000 |
35 | @@ -76,10 +76,17 @@ |
36 | |
37 | builder - the response model builder |
38 | """ |
39 | - reservation_id = req.str_GET.get('reservation_id') |
40 | + query_str = req.str_GET |
41 | + reservation_id = query_str.get('reservation_id') |
42 | + project_id = query_str.get('project_id') |
43 | + fixed_ip = query_str.get('fixed_ip') |
44 | + recurse_zones = utils.bool_from_str(query_str.get('recurse_zones')) |
45 | instance_list = self.compute_api.get_all( |
46 | - req.environ['nova.context'], |
47 | - reservation_id=reservation_id) |
48 | + req.environ['nova.context'], |
49 | + reservation_id=reservation_id, |
50 | + project_id=project_id, |
51 | + fixed_ip=fixed_ip, |
52 | + recurse_zones=recurse_zones) |
53 | limited_list = self._limit_items(instance_list, req) |
54 | builder = self._get_view_builder(req) |
55 | servers = [builder.build(inst, is_detail)['server'] |
56 | @@ -111,14 +118,15 @@ |
57 | extra_values = None |
58 | result = None |
59 | try: |
60 | - extra_values, result = self.helper.create_instance( |
61 | - req, body, self.compute_api.create) |
62 | + extra_values, instances = self.helper.create_instance( |
63 | + req, body, self.compute_api.create) |
64 | except faults.Fault, f: |
65 | return f |
66 | |
67 | - instances = result |
68 | - |
69 | - (inst, ) = instances |
70 | + # We can only return 1 instance via the API, if we happen to |
71 | + # build more than one... instances is a list, so we'll just |
72 | + # use the first one.. |
73 | + inst = instances[0] |
74 | for key in ['instance_type', 'image_ref']: |
75 | inst[key] = extra_values[key] |
76 | |
77 | |
78 | === modified file 'nova/compute/api.py' |
79 | --- nova/compute/api.py 2011-06-28 22:05:41 +0000 |
80 | +++ nova/compute/api.py 2011-06-29 22:27:29 +0000 |
81 | @@ -143,7 +143,7 @@ |
82 | |
83 | def _check_create_parameters(self, context, instance_type, |
84 | image_href, kernel_id=None, ramdisk_id=None, |
85 | - min_count=1, max_count=1, |
86 | + min_count=None, max_count=None, |
87 | display_name='', display_description='', |
88 | key_name=None, key_data=None, security_group='default', |
89 | availability_zone=None, user_data=None, metadata={}, |
90 | @@ -154,6 +154,10 @@ |
91 | |
92 | if not instance_type: |
93 | instance_type = instance_types.get_default_instance_type() |
94 | + if not min_count: |
95 | + min_count = 1 |
96 | + if not max_count: |
97 | + max_count = min_count |
98 | |
99 | num_instances = quota.allowed_instances(context, max_count, |
100 | instance_type) |
101 | @@ -338,7 +342,7 @@ |
102 | |
103 | def create_all_at_once(self, context, instance_type, |
104 | image_href, kernel_id=None, ramdisk_id=None, |
105 | - min_count=1, max_count=1, |
106 | + min_count=None, max_count=None, |
107 | display_name='', display_description='', |
108 | key_name=None, key_data=None, security_group='default', |
109 | availability_zone=None, user_data=None, metadata={}, |
110 | @@ -368,7 +372,7 @@ |
111 | |
112 | def create(self, context, instance_type, |
113 | image_href, kernel_id=None, ramdisk_id=None, |
114 | - min_count=1, max_count=1, |
115 | + min_count=None, max_count=None, |
116 | display_name='', display_description='', |
117 | key_name=None, key_data=None, security_group='default', |
118 | availability_zone=None, user_data=None, metadata={}, |
119 | @@ -613,17 +617,53 @@ |
120 | """ |
121 | return self.get(context, instance_id) |
122 | |
123 | - def get_all_across_zones(self, context, reservation_id): |
124 | - """Get all instances with this reservation_id, across |
125 | - all available Zones (if any). |
126 | + def get_all(self, context, project_id=None, reservation_id=None, |
127 | + fixed_ip=None, recurse_zones=False): |
128 | + """Get all instances filtered by one of the given parameters. |
129 | + |
130 | + If there is no filter and the context is an admin, it will retreive |
131 | + all instances in the system. |
132 | """ |
133 | - context = context.elevated() |
134 | - instances = self.db.instance_get_all_by_reservation( |
135 | + |
136 | + if reservation_id is not None: |
137 | + recurse_zones = True |
138 | + instances = self.db.instance_get_all_by_reservation( |
139 | context, reservation_id) |
140 | - |
141 | - children = scheduler_api.call_zone_method(context, "list", |
142 | - novaclient_collection_name="servers", |
143 | - reservation_id=reservation_id) |
144 | + elif fixed_ip is not None: |
145 | + try: |
146 | + instances = self.db.fixed_ip_get_instance(context, fixed_ip) |
147 | + except exception.FloatingIpNotFound, e: |
148 | + if not recurse_zones: |
149 | + raise |
150 | + instances = None |
151 | + elif project_id or not context.is_admin: |
152 | + if not context.project: |
153 | + instances = self.db.instance_get_all_by_user( |
154 | + context, context.user_id) |
155 | + else: |
156 | + if project_id is None: |
157 | + project_id = context.project_id |
158 | + instances = self.db.instance_get_all_by_project( |
159 | + context, project_id) |
160 | + else: |
161 | + instances = self.db.instance_get_all(context) |
162 | + |
163 | + if instances is None: |
164 | + instances = [] |
165 | + elif not isinstance(instances, list): |
166 | + instances = [instances] |
167 | + |
168 | + if not recurse_zones: |
169 | + return instances |
170 | + |
171 | + admin_context = context.elevated() |
172 | + children = scheduler_api.call_zone_method(admin_context, |
173 | + "list", |
174 | + novaclient_collection_name="servers", |
175 | + reservation_id=reservation_id, |
176 | + project_id=project_id, |
177 | + fixed_ip=fixed_ip, |
178 | + recurse_zones=True) |
179 | |
180 | for zone, servers in children: |
181 | for server in servers: |
182 | @@ -632,32 +672,6 @@ |
183 | instances.append(server._info) |
184 | return instances |
185 | |
186 | - def get_all(self, context, project_id=None, reservation_id=None, |
187 | - fixed_ip=None): |
188 | - """Get all instances filtered by one of the given parameters. |
189 | - |
190 | - If there is no filter and the context is an admin, it will retreive |
191 | - all instances in the system. |
192 | - """ |
193 | - if reservation_id is not None: |
194 | - return self.get_all_across_zones(context, reservation_id) |
195 | - |
196 | - if fixed_ip is not None: |
197 | - return self.db.fixed_ip_get_instance(context, fixed_ip) |
198 | - |
199 | - if project_id or not context.is_admin: |
200 | - if not context.project: |
201 | - return self.db.instance_get_all_by_user( |
202 | - context, context.user_id) |
203 | - |
204 | - if project_id is None: |
205 | - project_id = context.project_id |
206 | - |
207 | - return self.db.instance_get_all_by_project( |
208 | - context, project_id) |
209 | - |
210 | - return self.db.instance_get_all(context) |
211 | - |
212 | def _cast_compute_message(self, method, context, instance_id, host=None, |
213 | params=None): |
214 | """Generic handler for RPC casts to compute. |
215 | |
216 | === modified file 'nova/scheduler/api.py' |
217 | --- nova/scheduler/api.py 2011-06-24 12:01:51 +0000 |
218 | +++ nova/scheduler/api.py 2011-06-29 22:27:29 +0000 |
219 | @@ -162,32 +162,53 @@ |
220 | _wrap_method(_process, func), zone_list)] |
221 | |
222 | |
223 | -def _issue_novaclient_command(nova, zone, collection, method_name, item_id): |
224 | +def _issue_novaclient_command(nova, zone, collection, |
225 | + method_name, *args, **kwargs): |
226 | """Use novaclient to issue command to a single child zone. |
227 | - One of these will be run in parallel for each child zone.""" |
228 | + One of these will be run in parallel for each child zone. |
229 | + """ |
230 | manager = getattr(nova, collection) |
231 | - result = None |
232 | + |
233 | + # NOTE(comstud): This is not ideal, but we have to do this based on |
234 | + # how novaclient is implemented right now. |
235 | + # 'find' is special cased as novaclient requires kwargs for it to |
236 | + # filter on a 'get_all'. |
237 | + # Every other method first needs to do a 'get' on the first argument |
238 | + # passed, which should be a UUID. If it's 'get' itself that we want, |
239 | + # we just return the result. Otherwise, we next call the real method |
240 | + # that's wanted... passing other arguments that may or may not exist. |
241 | + if method_name in ['find', 'findall']: |
242 | + try: |
243 | + return getattr(manager, method_name)(**kwargs) |
244 | + except novaclient.NotFound: |
245 | + url = zone.api_url |
246 | + LOG.debug(_("%(collection)s.%(method_name)s didn't find " |
247 | + "anything matching '%(kwargs)s' on '%(url)s'" % |
248 | + locals())) |
249 | + return None |
250 | + |
251 | + args = list(args) |
252 | + # pop off the UUID to look up |
253 | + item = args.pop(0) |
254 | try: |
255 | - try: |
256 | - result = manager.get(int(item_id)) |
257 | - except ValueError, e: |
258 | - result = manager.find(name=item_id) |
259 | + result = manager.get(item) |
260 | except novaclient.NotFound: |
261 | url = zone.api_url |
262 | - LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % |
263 | + LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % |
264 | locals())) |
265 | return None |
266 | |
267 | - if method_name.lower() not in ['get', 'find']: |
268 | - result = getattr(result, method_name)() |
269 | + if method_name.lower() != 'get': |
270 | + # if we're doing something other than 'get', call it passing args. |
271 | + result = getattr(result, method_name)(*args, **kwargs) |
272 | return result |
273 | |
274 | |
275 | -def wrap_novaclient_function(f, collection, method_name, item_id): |
276 | - """Appends collection, method_name and item_id to the incoming |
277 | +def wrap_novaclient_function(f, collection, method_name, *args, **kwargs): |
278 | + """Appends collection, method_name and arguments to the incoming |
279 | (nova, zone) call from child_zone_helper.""" |
280 | def inner(nova, zone): |
281 | - return f(nova, zone, collection, method_name, item_id) |
282 | + return f(nova, zone, collection, method_name, *args, **kwargs) |
283 | |
284 | return inner |
285 | |
286 | @@ -220,7 +241,7 @@ |
287 | the wrapped method. (This ensures that zone-local code can |
288 | continue to use integer IDs). |
289 | |
290 | - 4. If the item was not found, we delgate the call to a child zone |
291 | + 4. If the item was not found, we delegate the call to a child zone |
292 | using the UUID. |
293 | """ |
294 | def __init__(self, method_name): |
295 | |
296 | === modified file 'nova/scheduler/host_filter.py' |
297 | --- nova/scheduler/host_filter.py 2011-06-24 14:52:59 +0000 |
298 | +++ nova/scheduler/host_filter.py 2011-06-29 22:27:29 +0000 |
299 | @@ -329,8 +329,9 @@ |
300 | 'instance_type': <InstanceType dict>} |
301 | """ |
302 | |
303 | - def filter_hosts(self, num, request_spec): |
304 | + def filter_hosts(self, topic, request_spec, hosts=None): |
305 | """Filter the full host list (from the ZoneManager)""" |
306 | + |
307 | filter_name = request_spec.get('filter', None) |
308 | host_filter = choose_host_filter(filter_name) |
309 | |
310 | @@ -341,8 +342,9 @@ |
311 | name, query = host_filter.instance_type_to_filter(instance_type) |
312 | return host_filter.filter_hosts(self.zone_manager, query) |
313 | |
314 | - def weigh_hosts(self, num, request_spec, hosts): |
315 | + def weigh_hosts(self, topic, request_spec, hosts): |
316 | """Derived classes must override this method and return |
317 | a lists of hosts in [{weight, hostname}] format. |
318 | """ |
319 | - return [dict(weight=1, hostname=host) for host, caps in hosts] |
320 | + return [dict(weight=1, hostname=hostname, capabilities=caps) |
321 | + for hostname, caps in hosts] |
322 | |
323 | === modified file 'nova/scheduler/least_cost.py' |
324 | --- nova/scheduler/least_cost.py 2011-05-17 23:15:31 +0000 |
325 | +++ nova/scheduler/least_cost.py 2011-06-29 22:27:29 +0000 |
326 | @@ -48,25 +48,43 @@ |
327 | return 1 |
328 | |
329 | |
330 | -flags.DEFINE_integer('fill_first_cost_fn_weight', 1, |
331 | +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, |
332 | 'How much weight to give the fill-first cost function') |
333 | |
334 | |
335 | -def fill_first_cost_fn(host): |
336 | +def compute_fill_first_cost_fn(host): |
337 | """Prefer hosts that have less ram available, filter_hosts will exclude |
338 | hosts that don't have enough ram""" |
339 | hostname, caps = host |
340 | - free_mem = caps['compute']['host_memory_free'] |
341 | + free_mem = caps['host_memory_free'] |
342 | return free_mem |
343 | |
344 | |
345 | class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): |
346 | - def get_cost_fns(self): |
347 | + def __init__(self, *args, **kwargs): |
348 | + self.cost_fns_cache = {} |
349 | + super(LeastCostScheduler, self).__init__(*args, **kwargs) |
350 | + |
351 | + def get_cost_fns(self, topic): |
352 | """Returns a list of tuples containing weights and cost functions to |
353 | use for weighing hosts |
354 | """ |
355 | + |
356 | + if topic in self.cost_fns_cache: |
357 | + return self.cost_fns_cache[topic] |
358 | + |
359 | cost_fns = [] |
360 | for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: |
361 | + if '.' in cost_fn_str: |
362 | + short_name = cost_fn_str.split('.')[-1] |
363 | + else: |
364 | + short_name = cost_fn_str |
365 | + cost_fn_str = "%s.%s.%s" % ( |
366 | + __name__, self.__class__.__name__, short_name) |
367 | + |
368 | + if not (short_name.startswith('%s_' % topic) or |
369 | + short_name.startswith('noop')): |
370 | + continue |
371 | |
372 | try: |
373 | # NOTE(sirp): import_class is somewhat misnamed since it can |
374 | @@ -84,23 +102,23 @@ |
375 | |
376 | cost_fns.append((weight, cost_fn)) |
377 | |
378 | + self.cost_fns_cache[topic] = cost_fns |
379 | return cost_fns |
380 | |
381 | - def weigh_hosts(self, num, request_spec, hosts): |
382 | + def weigh_hosts(self, topic, request_spec, hosts): |
383 | """Returns a list of dictionaries of form: |
384 | - [ {weight: weight, hostname: hostname} ]""" |
385 | - |
386 | - # FIXME(sirp): weigh_hosts should handle more than just instances |
387 | - hostnames = [hostname for hostname, caps in hosts] |
388 | - |
389 | - cost_fns = self.get_cost_fns() |
390 | + [ {weight: weight, hostname: hostname, capabilities: capabs} ] |
391 | + """ |
392 | + |
393 | + cost_fns = self.get_cost_fns(topic) |
394 | costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) |
395 | |
396 | weighted = [] |
397 | weight_log = [] |
398 | - for cost, hostname in zip(costs, hostnames): |
399 | + for cost, (hostname, caps) in zip(costs, hosts): |
400 | weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) |
401 | - weight_dict = dict(weight=cost, hostname=hostname) |
402 | + weight_dict = dict(weight=cost, hostname=hostname, |
403 | + capabilities=caps) |
404 | weighted.append(weight_dict) |
405 | |
406 | LOG.debug(_("Weighted Costs => %s") % weight_log) |
407 | @@ -127,7 +145,8 @@ |
408 | weighted_fns - list of weights and functions like: |
409 | [(weight, objective-functions)] |
410 | |
411 | - Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) |
412 | + Returns an unsorted list of scores. To pair with hosts do: |
413 | + zip(scores, hosts) |
414 | """ |
415 | # Table of form: |
416 | # { domain1: [score1, score2, ..., scoreM] |
417 | @@ -150,7 +169,6 @@ |
418 | domain_scores = [] |
419 | for idx in sorted(score_table): |
420 | elem_score = sum(score_table[idx]) |
421 | - elem = domain[idx] |
422 | domain_scores.append(elem_score) |
423 | |
424 | return domain_scores |
425 | |
426 | === modified file 'nova/scheduler/zone_aware_scheduler.py' |
427 | --- nova/scheduler/zone_aware_scheduler.py 2011-06-15 17:21:41 +0000 |
428 | +++ nova/scheduler/zone_aware_scheduler.py 2011-06-29 22:27:29 +0000 |
429 | @@ -180,18 +180,22 @@ |
430 | request_spec, kwargs) |
431 | return None |
432 | |
433 | + num_instances = request_spec.get('num_instances', 1) |
434 | + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % |
435 | + locals()) |
436 | + |
437 | # Create build plan and provision ... |
438 | build_plan = self.select(context, request_spec) |
439 | if not build_plan: |
440 | raise driver.NoValidHost(_('No hosts were available')) |
441 | |
442 | - for num in xrange(request_spec['num_instances']): |
443 | + for num in xrange(num_instances): |
444 | if not build_plan: |
445 | break |
446 | |
447 | - item = build_plan.pop(0) |
448 | - self._provision_resource(context, item, instance_id, request_spec, |
449 | - kwargs) |
450 | + build_plan_item = build_plan.pop(0) |
451 | + self._provision_resource(context, build_plan_item, instance_id, |
452 | + request_spec, kwargs) |
453 | |
454 | # Returning None short-circuits the routing to Compute (since |
455 | # we've already done it here) |
456 | @@ -224,18 +228,36 @@ |
457 | raise NotImplemented(_("Zone Aware Scheduler only understands " |
458 | "Compute nodes (for now)")) |
459 | |
460 | - #TODO(sandy): how to infer this from OS API params? |
461 | - num_instances = 1 |
462 | - |
463 | - # Filter local hosts based on requirements ... |
464 | - host_list = self.filter_hosts(num_instances, request_spec) |
465 | - |
466 | - # TODO(sirp): weigh_hosts should also be a function of 'topic' or |
467 | - # resources, so that we can apply different objective functions to it |
468 | - |
469 | - # then weigh the selected hosts. |
470 | - # weighted = [{weight=weight, name=hostname}, ...] |
471 | - weighted = self.weigh_hosts(num_instances, request_spec, host_list) |
472 | + num_instances = request_spec.get('num_instances', 1) |
473 | + instance_type = request_spec['instance_type'] |
474 | + |
475 | + weighted = [] |
476 | + host_list = None |
477 | + |
478 | + for i in xrange(num_instances): |
479 | + # Filter local hosts based on requirements ... |
480 | + # |
481 | + # The first pass through here will pass 'None' as the |
482 | + # host_list.. which tells the filter to build the full |
483 | + # list of hosts. |
484 | + # On a 2nd pass, the filter can modify the host_list with |
485 | + # any updates it needs to make based on resources that |
486 | + # may have been consumed from a previous build.. |
487 | + host_list = self.filter_hosts(topic, request_spec, host_list) |
488 | + if not host_list: |
489 | + LOG.warn(_("Filter returned no hosts after processing " |
490 | + "%(i)d of %(num_instances)d instances") % locals()) |
491 | + break |
492 | + |
493 | + # then weigh the selected hosts. |
494 | + # weighted = [{weight=weight, hostname=hostname, |
495 | + # capabilities=capabs}, ...] |
496 | + weights = self.weigh_hosts(topic, request_spec, host_list) |
497 | + weights.sort(key=operator.itemgetter('weight')) |
498 | + best_weight = weights[0] |
499 | + weighted.append(best_weight) |
500 | + self.consume_resources(topic, best_weight['capabilities'], |
501 | + instance_type) |
502 | |
503 | # Next, tack on the best weights from the child zones ... |
504 | json_spec = json.dumps(request_spec) |
505 | @@ -254,18 +276,65 @@ |
506 | weighted.sort(key=operator.itemgetter('weight')) |
507 | return weighted |
508 | |
509 | - def filter_hosts(self, num, request_spec): |
510 | - """Derived classes must override this method and return |
511 | - a list of hosts in [(hostname, capability_dict)] format. |
512 | - """ |
513 | - # NOTE(sirp): The default logic is the equivalent to AllHostsFilter |
514 | - service_states = self.zone_manager.service_states |
515 | - return [(host, services) |
516 | - for host, services in service_states.iteritems()] |
517 | - |
518 | - def weigh_hosts(self, num, request_spec, hosts): |
519 | + def compute_filter(self, hostname, capabilities, request_spec): |
520 | + """Return whether or not we can schedule to this compute node. |
521 | + Derived classes should override this and return True if the host |
522 | + is acceptable for scheduling. |
523 | + """ |
524 | + instance_type = request_spec['instance_type'] |
525 | + requested_mem = instance_type['memory_mb'] * 1024 * 1024 |
526 | + return capabilities['host_memory_free'] >= requested_mem |
527 | + |
528 | + def filter_hosts(self, topic, request_spec, host_list=None): |
529 | + """Return a list of hosts which are acceptable for scheduling. |
530 | + Return value should be a list of (hostname, capability_dict)s. |
531 | + Derived classes may override this, but may find the |
532 | + '<topic>_filter' function more appropriate. |
533 | + """ |
534 | + |
535 | + def _default_filter(self, hostname, capabilities, request_spec): |
536 | + """Default filter function if there's no <topic>_filter""" |
537 | + # NOTE(sirp): The default logic is the equivalent to |
538 | + # AllHostsFilter |
539 | + return True |
540 | + |
541 | + filter_func = getattr(self, '%s_filter' % topic, _default_filter) |
542 | + |
543 | + if host_list is None: |
544 | + first_run = True |
545 | + host_list = self.zone_manager.service_states.iteritems() |
546 | + else: |
547 | + first_run = False |
548 | + |
549 | + filtered_hosts = [] |
550 | + for host, services in host_list: |
551 | + if first_run: |
552 | + if topic not in services: |
553 | + continue |
554 | + services = services[topic] |
555 | + if filter_func(host, services, request_spec): |
556 | + filtered_hosts.append((host, services)) |
557 | + return filtered_hosts |
558 | + |
559 | + def weigh_hosts(self, topic, request_spec, hosts): |
560 | """Derived classes may override this to provide more sophisticated |
561 | scheduling objectives |
562 | """ |
563 | # NOTE(sirp): The default logic is the same as the NoopCostFunction |
564 | - return [dict(weight=1, hostname=host) for host, caps in hosts] |
565 | + return [dict(weight=1, hostname=hostname, capabilities=capabilities) |
566 | + for hostname, capabilities in hosts] |
567 | + |
568 | + def compute_consume(self, capabilities, instance_type): |
569 | + """Consume compute resources for selected host""" |
570 | + |
571 | + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 |
572 | + capabilities['host_memory_free'] -= requested_mem |
573 | + |
574 | + def consume_resources(self, topic, capabilities, instance_type): |
575 | + """Consume resources for a specific host. 'host' is a tuple |
576 | + of the hostname and the services""" |
577 | + |
578 | + consume_func = getattr(self, '%s_consume' % topic, None) |
579 | + if not consume_func: |
580 | + return |
581 | + consume_func(capabilities, instance_type) |
582 | |
583 | === modified file 'nova/tests/scheduler/test_least_cost_scheduler.py' |
584 | --- nova/tests/scheduler/test_least_cost_scheduler.py 2011-06-14 01:14:26 +0000 |
585 | +++ nova/tests/scheduler/test_least_cost_scheduler.py 2011-06-29 22:27:29 +0000 |
586 | @@ -122,15 +122,16 @@ |
587 | for hostname, caps in hosts] |
588 | self.assertWeights(expected, num, request_spec, hosts) |
589 | |
590 | - def test_fill_first_cost_fn(self): |
591 | + def test_compute_fill_first_cost_fn(self): |
592 | FLAGS.least_cost_scheduler_cost_functions = [ |
593 | - 'nova.scheduler.least_cost.fill_first_cost_fn', |
594 | + 'nova.scheduler.least_cost.compute_fill_first_cost_fn', |
595 | ] |
596 | - FLAGS.fill_first_cost_fn_weight = 1 |
597 | + FLAGS.compute_fill_first_cost_fn_weight = 1 |
598 | |
599 | num = 1 |
600 | - request_spec = {} |
601 | - hosts = self.sched.filter_hosts(num, request_spec) |
602 | + instance_type = {'memory_mb': 1024} |
603 | + request_spec = {'instance_type': instance_type} |
604 | + hosts = self.sched.filter_hosts('compute', request_spec, None) |
605 | |
606 | expected = [] |
607 | for idx, (hostname, caps) in enumerate(hosts): |
608 | |
609 | === modified file 'nova/tests/scheduler/test_scheduler.py' |
610 | --- nova/tests/scheduler/test_scheduler.py 2011-06-24 12:01:51 +0000 |
611 | +++ nova/tests/scheduler/test_scheduler.py 2011-06-29 22:27:29 +0000 |
612 | @@ -1074,7 +1074,7 @@ |
613 | |
614 | self.assertEquals(api._issue_novaclient_command( |
615 | FakeNovaClient(FakeServerCollection()), |
616 | - zone, "servers", "find", "name").b, 22) |
617 | + zone, "servers", "find", name="test").b, 22) |
618 | |
619 | self.assertEquals(api._issue_novaclient_command( |
620 | FakeNovaClient(FakeServerCollection()), |
621 | @@ -1088,7 +1088,7 @@ |
622 | |
623 | self.assertEquals(api._issue_novaclient_command( |
624 | FakeNovaClient(FakeEmptyServerCollection()), |
625 | - zone, "servers", "find", "name"), None) |
626 | + zone, "servers", "find", name="test"), None) |
627 | |
628 | self.assertEquals(api._issue_novaclient_command( |
629 | FakeNovaClient(FakeEmptyServerCollection()), |
630 | |
631 | === modified file 'nova/tests/scheduler/test_zone_aware_scheduler.py' |
632 | --- nova/tests/scheduler/test_zone_aware_scheduler.py 2011-06-14 01:14:26 +0000 |
633 | +++ nova/tests/scheduler/test_zone_aware_scheduler.py 2011-06-29 22:27:29 +0000 |
634 | @@ -55,29 +55,21 @@ |
635 | |
636 | |
637 | class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): |
638 | - def filter_hosts(self, num, specs): |
639 | - # NOTE(sirp): this is returning [(hostname, services)] |
640 | - return self.zone_manager.service_states.items() |
641 | - |
642 | - def weigh_hosts(self, num, specs, hosts): |
643 | - fake_weight = 99 |
644 | - weighted = [] |
645 | - for hostname, caps in hosts: |
646 | - weighted.append(dict(weight=fake_weight, name=hostname)) |
647 | - return weighted |
648 | + # No need to stub anything at the moment |
649 | + pass |
650 | |
651 | |
652 | class FakeZoneManager(zone_manager.ZoneManager): |
653 | def __init__(self): |
654 | self.service_states = { |
655 | 'host1': { |
656 | - 'compute': {'ram': 1000}, |
657 | + 'compute': {'host_memory_free': 1073741824}, |
658 | }, |
659 | 'host2': { |
660 | - 'compute': {'ram': 2000}, |
661 | + 'compute': {'host_memory_free': 2147483648}, |
662 | }, |
663 | 'host3': { |
664 | - 'compute': {'ram': 3000}, |
665 | + 'compute': {'host_memory_free': 3221225472}, |
666 | }, |
667 | } |
668 | |
669 | @@ -154,8 +146,8 @@ |
670 | |
671 | def test_zone_aware_scheduler(self): |
672 | """ |
673 | - Create a nested set of FakeZones, ensure that a select call returns the |
674 | - appropriate build plan. |
675 | + Create a nested set of FakeZones, try to build multiple instances |
676 | + and ensure that a select call returns the appropriate build plan. |
677 | """ |
678 | sched = FakeZoneAwareScheduler() |
679 | self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) |
680 | @@ -164,13 +156,17 @@ |
681 | sched.set_zone_manager(zm) |
682 | |
683 | fake_context = {} |
684 | - build_plan = sched.select(fake_context, {}) |
685 | - |
686 | - self.assertEqual(15, len(build_plan)) |
687 | - |
688 | - hostnames = [plan_item['name'] |
689 | - for plan_item in build_plan if 'name' in plan_item] |
690 | - self.assertEqual(3, len(hostnames)) |
691 | + build_plan = sched.select(fake_context, |
692 | + {'instance_type': {'memory_mb': 512}, |
693 | + 'num_instances': 4}) |
694 | + |
695 | + # 4 from local zones, 12 from remotes |
696 | + self.assertEqual(16, len(build_plan)) |
697 | + |
698 | + hostnames = [plan_item['hostname'] |
699 | + for plan_item in build_plan if 'hostname' in plan_item] |
700 | + # 4 local hosts |
701 | + self.assertEqual(4, len(hostnames)) |
702 | |
703 | def test_empty_zone_aware_scheduler(self): |
704 | """ |
705 | @@ -185,8 +181,7 @@ |
706 | fake_context = {} |
707 | self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, |
708 | fake_context, 1, |
709 | - dict(host_filter=None, |
710 | - request_spec={'instance_type': {}})) |
711 | + dict(host_filter=None, instance_type={})) |
712 | |
713 | def test_schedule_do_not_schedule_with_hint(self): |
714 | """ |
715 | |
716 | === modified file 'nova/tests/test_utils.py' |
717 | --- nova/tests/test_utils.py 2011-06-24 12:01:51 +0000 |
718 | +++ nova/tests/test_utils.py 2011-06-29 22:27:29 +0000 |
719 | @@ -276,6 +276,19 @@ |
720 | result = utils.parse_server_string('www.exa:mple.com:8443') |
721 | self.assertEqual(('', ''), result) |
722 | |
723 | + def test_bool_from_str(self): |
724 | + self.assertTrue(utils.bool_from_str('1')) |
725 | + self.assertTrue(utils.bool_from_str('2')) |
726 | + self.assertTrue(utils.bool_from_str('-1')) |
727 | + self.assertTrue(utils.bool_from_str('true')) |
728 | + self.assertTrue(utils.bool_from_str('True')) |
729 | + self.assertTrue(utils.bool_from_str('tRuE')) |
730 | + self.assertFalse(utils.bool_from_str('False')) |
731 | + self.assertFalse(utils.bool_from_str('false')) |
732 | + self.assertFalse(utils.bool_from_str('0')) |
733 | + self.assertFalse(utils.bool_from_str(None)) |
734 | + self.assertFalse(utils.bool_from_str('junk')) |
735 | + |
736 | |
737 | class IsUUIDLikeTestCase(test.TestCase): |
738 | def assertUUIDLike(self, val, expected): |
739 | |
740 | === modified file 'nova/utils.py' |
741 | --- nova/utils.py 2011-06-28 20:53:45 +0000 |
742 | +++ nova/utils.py 2011-06-29 22:27:29 +0000 |
743 | @@ -772,6 +772,17 @@ |
744 | return (len(val) == 36) and (val.count('-') == 4) |
745 | |
746 | |
747 | +def bool_from_str(val): |
748 | + """Convert a string representation of a bool into a bool value""" |
749 | + |
750 | + if not val: |
751 | + return False |
752 | + try: |
753 | + return True if int(val) else False |
754 | + except ValueError: |
755 | + return val.lower() == 'true' |
756 | + |
757 | + |
758 | class Bootstrapper(object): |
759 | """Provides environment bootstrapping capabilities for entry points.""" |
760 | |
761 | |
762 | === modified file 'tools/pip-requires' |
763 | --- tools/pip-requires 2011-06-28 15:13:52 +0000 |
764 | +++ tools/pip-requires 2011-06-29 22:27:29 +0000 |
765 | @@ -9,7 +9,7 @@ |
766 | carrot==0.10.5 |
767 | eventlet |
768 | lockfile==0.8 |
769 | -python-novaclient==2.5.3 |
770 | +python-novaclient==2.5.7 |
771 | python-daemon==1.5.5 |
772 | python-gflags==1.3 |
773 | redis==2.0.0 |
Lines 8-15: if min_count is specified, but no max_count, then at line 14, min_count will be some integer, and max_count will be None. Line 15 will then set min_count to None, too. Is that the desired behavior? Wouldn't the logic used in lines 94-95 make more sense?
Lines 45-51: as long as you're changing that line, can you fix the wonky indentation of the continued lines to use one of the standards for nova? Two levels (i.e., 8 spaces) is preferred, but you can also do the 'align with the item in the main line' approach. The current indentation uses something apparently random.
Lines 168-174: same as above.
Line 485: The log message sounds incorrect; instead of saying "ran out of hosts after weighing", it should probably say "no available hosts after filtering" or something like that.