Merge lp:~lamont/maas/raid10 into lp:~maas-committers/maas/trunk
- raid10
- Merge into trunk
Proposed by
LaMont Jones
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | LaMont Jones | ||||
Approved revision: | no longer in the source branch. | ||||
Merged at revision: | 4432 | ||||
Proposed branch: | lp:~lamont/maas/raid10 | ||||
Merge into: | lp:~maas-committers/maas/trunk | ||||
Diff against target: |
533 lines (+275/-8) 9 files modified
src/maasserver/api/tests/test_raid.py (+74/-3) src/maasserver/enum.py (+5/-0) src/maasserver/models/filesystemgroup.py (+9/-0) src/maasserver/models/tests/test_filesystemgroup.py (+119/-3) src/maasserver/preseed_storage.py (+1/-0) src/maasserver/static/js/angular/controllers/node_details_storage.js (+9/-0) src/maasserver/static/js/angular/controllers/tests/test_node_details_storage.js (+45/-2) src/maasserver/testing/factory.py (+12/-0) src/maasserver/tests/test_forms_raid.py (+1/-0) |
||||
To merge this branch: | bzr merge lp:~lamont/maas/raid10 | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Blake Rouse (community) | Approve | ||
Review via email: mp+276058@code.launchpad.net |
Commit message
Add RAID 10 support to storage.
Description of the change
Add RAID 10 support to storage.
To post a comment you must log in.
Revision history for this message
LaMont Jones (lamont) wrote : | # |
> At quick glance looks to be missing unit tests for the JS.
Good catch. Added those, and updated them to fail if someone adds another raid type without fixing them.
Revision history for this message
Blake Rouse (blake-rouse) wrote : | # |
Looks really good!
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'src/maasserver/api/tests/test_raid.py' |
2 | --- src/maasserver/api/tests/test_raid.py 2015-09-24 16:22:12 +0000 |
3 | +++ src/maasserver/api/tests/test_raid.py 2015-10-29 13:27:52 +0000 |
4 | @@ -97,6 +97,8 @@ |
5 | node=node, group_type=FILESYSTEM_GROUP_TYPE.RAID_5), |
6 | factory.make_FilesystemGroup( |
7 | node=node, group_type=FILESYSTEM_GROUP_TYPE.RAID_6), |
8 | + factory.make_FilesystemGroup( |
9 | + node=node, group_type=FILESYSTEM_GROUP_TYPE.RAID_10), |
10 | ] |
11 | # Not RAID. Should not be in the output. |
12 | for _ in range(3): |
13 | @@ -419,7 +421,7 @@ |
14 | bds = [ |
15 | factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) |
16 | for i in range(10) |
17 | - ] |
18 | + ] |
19 | for bd in bds[5:]: |
20 | factory.make_PartitionTable(block_device=bd) |
21 | for bd in bds[5:]: |
22 | @@ -446,7 +448,7 @@ |
23 | |
24 | parsed_device = json.loads(response.content) |
25 | (parsed_block_devices, parsed_partitions, |
26 | - parsed_block_device_spares, parsed_partition_spares) = ( |
27 | + parsed_block_device_spares, parsed_partition_spares) = ( |
28 | get_devices_from_raid(parsed_device)) |
29 | # Size is equivalent to 6 devices of 9 TB each. |
30 | self.assertEqual( |
31 | @@ -457,6 +459,53 @@ |
32 | self.assertItemsEqual(spare_devices, parsed_block_device_spares) |
33 | self.assertItemsEqual(spare_partitions, parsed_partition_spares) |
34 | |
35 | + def test_create_raid_10(self): |
36 | + """Checks it's possible to create a RAID 10 using 4 raw |
37 | + devices, 4 partitions, one spare device and one spare partition.""" |
38 | + self.become_admin() |
39 | + node = factory.make_Node(status=NODE_STATUS.READY) |
40 | + # Add 10 10TB physical block devices to the node. |
41 | + bds = [ |
42 | + factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) |
43 | + for i in range(10) |
44 | + ] |
45 | + for bd in bds[5:]: |
46 | + factory.make_PartitionTable(block_device=bd) |
47 | + for bd in bds[5:]: |
48 | + bd.get_partitiontable().add_partition(size=1000 ** 4) |
49 | + large_partitions = [bd.get_partitiontable().add_partition() |
50 | + for bd in bds[5:]] |
51 | + uuid4 = unicode(uuid.uuid4()) |
52 | + uri = get_raid_devices_uri(node) |
53 | + block_devices = [bd.id for bd in bds[1:] |
54 | + if bd.get_partitiontable() is None] |
55 | + partitions = [lp.id for lp in large_partitions[1:]] |
56 | + spare_devices = [bds[0].id] |
57 | + spare_partitions = [large_partitions[0].id] |
58 | + response = self.client.post(uri, { |
59 | + 'name': 'md0', |
60 | + 'uuid': uuid4, |
61 | + 'level': FILESYSTEM_GROUP_TYPE.RAID_10, |
62 | + 'block_devices': block_devices, |
63 | + 'partitions': partitions, |
64 | + 'spare_devices': spare_devices, |
65 | + 'spare_partitions': spare_partitions, |
66 | + }) |
67 | + self.assertEqual(httplib.OK, response.status_code, response.content) |
68 | + |
69 | + parsed_device = json.loads(response.content) |
70 | + (parsed_block_devices, parsed_partitions, |
71 | + parsed_block_device_spares, parsed_partition_spares) = ( |
72 | + get_devices_from_raid(parsed_device)) |
73 | + # Size is equivalent to 4 devices of 9 TB each. |
74 | + self.assertEqual( |
75 | + 4 * ((9 * 1000 ** 4) - PARTITION_TABLE_EXTRA_SPACE), |
76 | + parsed_device['size']) |
77 | + self.assertItemsEqual(block_devices, parsed_block_devices) |
78 | + self.assertItemsEqual(partitions, parsed_partitions) |
79 | + self.assertItemsEqual(spare_devices, parsed_block_device_spares) |
80 | + self.assertItemsEqual(spare_partitions, parsed_partition_spares) |
81 | + |
82 | def test_create_raid_5_with_2_elements_fails(self): |
83 | self.become_admin() |
84 | node = factory.make_Node(status=NODE_STATUS.READY) |
85 | @@ -486,13 +535,35 @@ |
86 | bds = [ |
87 | factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) |
88 | for i in range(3) |
89 | + ] |
90 | + uuid4 = unicode(uuid.uuid4()) |
91 | + uri = get_raid_devices_uri(node) |
92 | + response = self.client.post(uri, { |
93 | + 'name': 'md0', |
94 | + 'uuid': uuid4, |
95 | + 'level': FILESYSTEM_GROUP_TYPE.RAID_6, |
96 | + 'block_devices': [bd.id for bd in bds], |
97 | + 'partitions': [], |
98 | + 'spare_devices': [], |
99 | + 'spare_partitions': [], |
100 | + }) |
101 | + self.assertEqual(httplib.BAD_REQUEST, response.status_code, |
102 | + response.content) |
103 | + |
104 | + def test_create_raid_10_with_2_elements_fails(self): |
105 | + self.become_admin() |
106 | + node = factory.make_Node(status=NODE_STATUS.READY) |
107 | + # Add 3 10TB physical block devices to the node. |
108 | + bds = [ |
109 | + factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) |
110 | + for i in range(2) |
111 | ] |
112 | uuid4 = unicode(uuid.uuid4()) |
113 | uri = get_raid_devices_uri(node) |
114 | response = self.client.post(uri, { |
115 | 'name': 'md0', |
116 | 'uuid': uuid4, |
117 | - 'level': FILESYSTEM_GROUP_TYPE.RAID_6, |
118 | + 'level': FILESYSTEM_GROUP_TYPE.RAID_10, |
119 | 'block_devices': [bd.id for bd in bds], |
120 | 'partitions': [], |
121 | 'spare_devices': [], |
122 | |
123 | === modified file 'src/maasserver/enum.py' |
124 | --- src/maasserver/enum.py 2015-10-04 20:47:36 +0000 |
125 | +++ src/maasserver/enum.py 2015-10-29 13:27:52 +0000 |
126 | @@ -463,6 +463,9 @@ |
127 | #: RAID level 6 |
128 | RAID_6 = 'raid-6' |
129 | |
130 | + #: RAID level 10 |
131 | + RAID_10 = 'raid-10' |
132 | + |
133 | #: Bcache |
134 | BCACHE = 'bcache' |
135 | |
136 | @@ -472,6 +475,7 @@ |
137 | FILESYSTEM_GROUP_TYPE.RAID_1, |
138 | FILESYSTEM_GROUP_TYPE.RAID_5, |
139 | FILESYSTEM_GROUP_TYPE.RAID_6, |
140 | + FILESYSTEM_GROUP_TYPE.RAID_10, |
141 | ] |
142 | |
143 | # Django choices for FILESYSTEM_GROUP_RAID_TYPES: sequence of tuples (key, UI |
144 | @@ -481,6 +485,7 @@ |
145 | (FILESYSTEM_GROUP_TYPE.RAID_1, "RAID 1"), |
146 | (FILESYSTEM_GROUP_TYPE.RAID_5, "RAID 5"), |
147 | (FILESYSTEM_GROUP_TYPE.RAID_6, "RAID 6"), |
148 | + (FILESYSTEM_GROUP_TYPE.RAID_10, "RAID 10"), |
149 | ) |
150 | |
151 | # Django choices for FILESYSTEM_GROUP_TYPE: sequence of tuples (key, UI |
152 | |
153 | === modified file 'src/maasserver/models/filesystemgroup.py' |
154 | --- src/maasserver/models/filesystemgroup.py 2015-10-09 04:35:47 +0000 |
155 | +++ src/maasserver/models/filesystemgroup.py 2015-10-29 13:27:52 +0000 |
156 | @@ -417,6 +417,8 @@ |
157 | return min_size * (num_raid - 1) |
158 | elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_6: |
159 | return min_size * (num_raid - 2) |
160 | + elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_10: |
161 | + return min_size * num_raid / 2 |
162 | raise ValidationError("Unknown raid type: %s" % self.group_type) |
163 | |
164 | def get_bcache_backing_filesystem(self): |
165 | @@ -583,6 +585,13 @@ |
166 | raise ValidationError( |
167 | "RAID level 6 must have at least 4 raid devices and " |
168 | "any number of spares.") |
169 | + elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_10: |
170 | + # RAID 10 must have at least 4 RAID filesystems, but can have |
171 | + # spares. |
172 | + if num_raid < 3: |
173 | + raise ValidationError( |
174 | + "RAID level 10 must have at least 3 raid devices and " |
175 | + "any number of spares.") |
176 | num_raid_invalid = len([ |
177 | fstype |
178 | for fstype in fstypes |
179 | |
180 | === modified file 'src/maasserver/models/tests/test_filesystemgroup.py' |
181 | --- src/maasserver/models/tests/test_filesystemgroup.py 2015-10-09 04:35:47 +0000 |
182 | +++ src/maasserver/models/tests/test_filesystemgroup.py 2015-10-29 13:27:52 +0000 |
183 | @@ -769,6 +769,36 @@ |
184 | self.assertEquals( |
185 | small_size * (number_of_raid_devices - 1), fsgroup.get_size()) |
186 | |
187 | + def test_get_size_returns_correct_disk_size_for_raid_10(self): |
188 | + node = factory.make_Node() |
189 | + small_size = random.randint( |
190 | + MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2) |
191 | + other_size = random.randint(small_size + 1, small_size + (10 ** 5)) |
192 | + number_of_raid_devices = random.randint(3, 9) |
193 | + filesystems = [ |
194 | + factory.make_Filesystem( |
195 | + fstype=FILESYSTEM_TYPE.RAID, |
196 | + block_device=factory.make_PhysicalBlockDevice( |
197 | + node=node, size=small_size)), |
198 | + ] |
199 | + for _ in range(number_of_raid_devices): |
200 | + filesystems.append( |
201 | + factory.make_Filesystem( |
202 | + fstype=FILESYSTEM_TYPE.RAID, |
203 | + block_device=factory.make_PhysicalBlockDevice( |
204 | + node=node, size=other_size))) |
205 | + # Spares are ignored and not taken into calculation. |
206 | + for _ in range(3): |
207 | + filesystems.append( |
208 | + factory.make_Filesystem( |
209 | + fstype=FILESYSTEM_TYPE.RAID_SPARE, |
210 | + block_device=factory.make_PhysicalBlockDevice( |
211 | + node=node, size=other_size))) |
212 | + fsgroup = factory.make_FilesystemGroup( |
213 | + group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems) |
214 | + self.assertEquals( |
215 | + small_size * (number_of_raid_devices + 1) / 2, fsgroup.get_size()) |
216 | + |
217 | def test_get_size_returns_0_if_bcache_without_backing(self): |
218 | fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE) |
219 | self.assertEquals(0, fsgroup.get_size()) |
220 | @@ -1071,7 +1101,7 @@ |
221 | fstype=FILESYSTEM_TYPE.RAID, |
222 | block_device=factory.make_PhysicalBlockDevice(node=node)) |
223 | for _ in range(random.randint(1, 3)) |
224 | - ] |
225 | + ] |
226 | with ExpectedException( |
227 | ValidationError, |
228 | re.escape( |
229 | @@ -1088,7 +1118,7 @@ |
230 | fstype=FILESYSTEM_TYPE.RAID, |
231 | block_device=factory.make_PhysicalBlockDevice(node=node)) |
232 | for _ in range(random.randint(4, 10)) |
233 | - ] |
234 | + ] |
235 | for _ in range(random.randint(1, 5)): |
236 | filesystems.append( |
237 | factory.make_Filesystem( |
238 | @@ -1099,6 +1129,59 @@ |
239 | group_type=FILESYSTEM_GROUP_TYPE.RAID_6, |
240 | filesystems=filesystems) |
241 | |
242 | + def test_cannot_save_raid_10_with_less_than_3_raid_devices(self): |
243 | + node = factory.make_Node() |
244 | + filesystems = [ |
245 | + factory.make_Filesystem( |
246 | + fstype=FILESYSTEM_TYPE.RAID, |
247 | + block_device=factory.make_PhysicalBlockDevice(node=node)) |
248 | + for _ in range(random.randint(1, 2)) |
249 | + ] |
250 | + with ExpectedException( |
251 | + ValidationError, |
252 | + re.escape( |
253 | + "{'__all__': [u'RAID level 10 must have at least 3 raid " |
254 | + "devices and any number of spares.']}")): |
255 | + factory.make_FilesystemGroup( |
256 | + group_type=FILESYSTEM_GROUP_TYPE.RAID_10, |
257 | + filesystems=filesystems) |
258 | + |
259 | + def test_can_save_raid_10_with_3_raid_devices_and_spares(self): |
260 | + node = factory.make_Node() |
261 | + filesystems = [ |
262 | + factory.make_Filesystem( |
263 | + fstype=FILESYSTEM_TYPE.RAID, |
264 | + block_device=factory.make_PhysicalBlockDevice(node=node)) |
265 | + for _ in range(3) |
266 | + ] |
267 | + for _ in range(random.randint(1, 5)): |
268 | + filesystems.append( |
269 | + factory.make_Filesystem( |
270 | + fstype=FILESYSTEM_TYPE.RAID_SPARE, |
271 | + block_device=factory.make_PhysicalBlockDevice(node=node))) |
272 | + # Test is that this does not raise an exception. |
273 | + factory.make_FilesystemGroup( |
274 | + group_type=FILESYSTEM_GROUP_TYPE.RAID_10, |
275 | + filesystems=filesystems) |
276 | + |
277 | + def test_can_save_raid_10_with_4_or_more_raid_devices_and_spares(self): |
278 | + node = factory.make_Node() |
279 | + filesystems = [ |
280 | + factory.make_Filesystem( |
281 | + fstype=FILESYSTEM_TYPE.RAID, |
282 | + block_device=factory.make_PhysicalBlockDevice(node=node)) |
283 | + for _ in range(random.randint(4, 10)) |
284 | + ] |
285 | + for _ in range(random.randint(1, 5)): |
286 | + filesystems.append( |
287 | + factory.make_Filesystem( |
288 | + fstype=FILESYSTEM_TYPE.RAID_SPARE, |
289 | + block_device=factory.make_PhysicalBlockDevice(node=node))) |
290 | + # Test is that this does not raise an exception. |
291 | + factory.make_FilesystemGroup( |
292 | + group_type=FILESYSTEM_GROUP_TYPE.RAID_10, |
293 | + filesystems=filesystems) |
294 | + |
295 | def test_cannot_save_bcache_without_cache_set(self): |
296 | node = factory.make_Node() |
297 | filesystems = [ |
298 | @@ -1319,6 +1402,10 @@ |
299 | (FILESYSTEM_GROUP_TYPE.RAID_6, { |
300 | "group_type": FILESYSTEM_GROUP_TYPE.RAID_6, |
301 | "name": "RAID", |
302 | + }), |
303 | + (FILESYSTEM_GROUP_TYPE.RAID_10, { |
304 | + "group_type": FILESYSTEM_GROUP_TYPE.RAID_10, |
305 | + "name": "RAID", |
306 | }), |
307 | (FILESYSTEM_GROUP_TYPE.BCACHE, { |
308 | "group_type": FILESYSTEM_GROUP_TYPE.BCACHE, |
309 | @@ -1355,6 +1442,10 @@ |
310 | (FILESYSTEM_GROUP_TYPE.RAID_6, { |
311 | "group_type": FILESYSTEM_GROUP_TYPE.RAID_6, |
312 | "prefix": "md", |
313 | + }), |
314 | + (FILESYSTEM_GROUP_TYPE.RAID_10, { |
315 | + "group_type": FILESYSTEM_GROUP_TYPE.RAID_10, |
316 | + "prefix": "md", |
317 | }), |
318 | (FILESYSTEM_GROUP_TYPE.BCACHE, { |
319 | "group_type": FILESYSTEM_GROUP_TYPE.BCACHE, |
320 | @@ -1391,6 +1482,10 @@ |
321 | (FILESYSTEM_GROUP_TYPE.RAID_6, { |
322 | "group_type": FILESYSTEM_GROUP_TYPE.RAID_6, |
323 | "block_size": 512, |
324 | + }), |
325 | + (FILESYSTEM_GROUP_TYPE.RAID_10, { |
326 | + "group_type": FILESYSTEM_GROUP_TYPE.RAID_10, |
327 | + "block_size": 512, |
328 | }), |
329 | # For BCACHE see |
330 | # `test_get_virtual_block_device_block_size_returns_backing_for_bc` |
331 | @@ -1695,7 +1790,7 @@ |
332 | block_devices = [ |
333 | factory.make_PhysicalBlockDevice(node=node) |
334 | for _ in range(3) |
335 | - ] |
336 | + ] |
337 | uuid = unicode(uuid4()) |
338 | with ExpectedException( |
339 | ValidationError, |
340 | @@ -1711,6 +1806,27 @@ |
341 | spare_devices=[], |
342 | spare_partitions=[]) |
343 | |
344 | + def test_create_raid_10_with_2_elements_fails(self): |
345 | + node = factory.make_Node() |
346 | + block_devices = [ |
347 | + factory.make_PhysicalBlockDevice(node=node) |
348 | + for _ in range(2) |
349 | + ] |
350 | + uuid = unicode(uuid4()) |
351 | + with ExpectedException( |
352 | + ValidationError, |
353 | + re.escape( |
354 | + "{'__all__': [u'RAID level 10 must have at least 3 raid " |
355 | + "devices and any number of spares.']}")): |
356 | + RAID.objects.create_raid( |
357 | + name='md0', |
358 | + level=FILESYSTEM_GROUP_TYPE.RAID_10, |
359 | + uuid=uuid, |
360 | + block_devices=block_devices, |
361 | + partitions=[], |
362 | + spare_devices=[], |
363 | + spare_partitions=[]) |
364 | + |
365 | def test_create_raid_with_block_device_from_other_node_fails(self): |
366 | node1 = factory.make_Node() |
367 | node2 = factory.make_Node() |
368 | |
369 | === modified file 'src/maasserver/preseed_storage.py' |
370 | --- src/maasserver/preseed_storage.py 2015-10-27 23:20:28 +0000 |
371 | +++ src/maasserver/preseed_storage.py 2015-10-29 13:27:52 +0000 |
372 | @@ -346,6 +346,7 @@ |
373 | FILESYSTEM_GROUP_TYPE.RAID_1: 1, |
374 | FILESYSTEM_GROUP_TYPE.RAID_5: 5, |
375 | FILESYSTEM_GROUP_TYPE.RAID_6: 6, |
376 | + FILESYSTEM_GROUP_TYPE.RAID_10: 10, |
377 | } |
378 | return raid_levels[filesystem_group.group_type] |
379 | |
380 | |
381 | === modified file 'src/maasserver/static/js/angular/controllers/node_details_storage.js' |
382 | --- src/maasserver/static/js/angular/controllers/node_details_storage.js 2015-10-23 16:34:24 +0000 |
383 | +++ src/maasserver/static/js/angular/controllers/node_details_storage.js 2015-10-29 13:27:52 +0000 |
384 | @@ -102,6 +102,15 @@ |
385 | calculateSize: function(minSize, numDisks) { |
386 | return minSize * (numDisks - 2); |
387 | } |
388 | + }, |
389 | + { |
390 | + level: "raid-10", |
391 | + title: "RAID 10", |
392 | + min_disks: 3, |
393 | + allows_spares: true, |
394 | + calculateSize: function(minSize, numDisks) { |
395 | + return minSize * numDisks / 2; |
396 | + } |
397 | } |
398 | ]; |
399 | |
400 | |
401 | === modified file 'src/maasserver/static/js/angular/controllers/tests/test_node_details_storage.js' |
402 | --- src/maasserver/static/js/angular/controllers/tests/test_node_details_storage.js 2015-10-23 18:39:01 +0000 |
403 | +++ src/maasserver/static/js/angular/controllers/tests/test_node_details_storage.js 2015-10-29 13:27:52 +0000 |
404 | @@ -3024,9 +3024,10 @@ |
405 | var modes = $scope.getAvailableRAIDModes(); |
406 | expect(modes[0].level).toEqual("raid-0"); |
407 | expect(modes[1].level).toEqual("raid-1"); |
408 | + expect(modes.length).toEqual(2); |
409 | }); |
410 | |
411 | - it("returns raid 0,1,5 for 3 disks", function() { |
412 | + it("returns raid 0,1,5,10 for 3 disks", function() { |
413 | var controller = makeController(); |
414 | $scope.availableNew.devices = [{}, {}, {}]; |
415 | |
416 | @@ -3034,9 +3035,11 @@ |
417 | expect(modes[0].level).toEqual("raid-0"); |
418 | expect(modes[1].level).toEqual("raid-1"); |
419 | expect(modes[2].level).toEqual("raid-5"); |
420 | + expect(modes[3].level).toEqual("raid-10"); |
421 | + expect(modes.length).toEqual(4); |
422 | }); |
423 | |
424 | - it("returns raid 0,1,5,6 for 4 disks", function() { |
425 | + it("returns raid 0,1,5,6,10 for 4 disks", function() { |
426 | var controller = makeController(); |
427 | $scope.availableNew.devices = [{}, {}, {}, {}]; |
428 | |
429 | @@ -3045,6 +3048,8 @@ |
430 | expect(modes[1].level).toEqual("raid-1"); |
431 | expect(modes[2].level).toEqual("raid-5"); |
432 | expect(modes[3].level).toEqual("raid-6"); |
433 | + expect(modes[4].level).toEqual("raid-10"); |
434 | + expect(modes.length).toEqual(5); |
435 | }); |
436 | }); |
437 | |
438 | @@ -3070,6 +3075,11 @@ |
439 | level: "raid-6", |
440 | min_disks: 4, |
441 | allows_spares: true |
442 | + }, |
443 | + { |
444 | + level: "raid-10", |
445 | + min_disks: 3, |
446 | + allows_spares: true |
447 | } |
448 | ]; |
449 | |
450 | @@ -3323,6 +3333,7 @@ |
451 | $scope.availableNew.mode = $scope.getAvailableRAIDModes()[2]; |
452 | $scope.setAsSpareRAIDMember(spare0); |
453 | |
454 | + // The 1MB spare causes us to only use 1MB of each active disk. |
455 | expect($scope.getNewRAIDSize()).toBe("2.0 MB"); |
456 | }); |
457 | |
458 | @@ -3358,8 +3369,40 @@ |
459 | $scope.availableNew.mode = $scope.getAvailableRAIDModes()[3]; |
460 | $scope.setAsSpareRAIDMember(spare0); |
461 | |
462 | + // The 1MB spare causes us to only use 1MB of each active disk. |
463 | expect($scope.getNewRAIDSize()).toBe("2.0 MB"); |
464 | }); |
465 | + |
466 | + it("gets proper raid-10 size", function() { |
467 | + var controller = makeController(); |
468 | + var disk0 = { |
469 | + original: { |
470 | + available_size: 2 * 1000 * 1000 |
471 | + } |
472 | + }; |
473 | + var disk1 = { |
474 | + original: { |
475 | + available_size: 2 * 1000 * 1000 |
476 | + } |
477 | + }; |
478 | + var disk2 = { |
479 | + original: { |
480 | + available_size: 2 * 1000 * 1000 |
481 | + } |
482 | + }; |
483 | + var spare0 = { |
484 | + original: { |
485 | + available_size: 1000 * 1000 |
486 | + } |
487 | + }; |
488 | + $scope.availableNew.spares = []; |
489 | + $scope.availableNew.devices = [disk0, disk1, disk2, spare0]; |
490 | + $scope.availableNew.mode = $scope.getAvailableRAIDModes()[4]; |
491 | + $scope.setAsSpareRAIDMember(spare0); |
492 | + |
493 | + // The 1MB spare causes us to only use 1MB of each active disk. |
494 | + expect($scope.getNewRAIDSize()).toBe("1.5 MB"); |
495 | + }); |
496 | }); |
497 | |
498 | describe("createRAIDCanSave", function() { |
499 | |
500 | === modified file 'src/maasserver/testing/factory.py' |
501 | --- src/maasserver/testing/factory.py 2015-10-28 22:03:20 +0000 |
502 | +++ src/maasserver/testing/factory.py 2015-10-29 13:27:52 +0000 |
503 | @@ -1395,6 +1395,18 @@ |
504 | fstype=FILESYSTEM_TYPE.RAID_SPARE, |
505 | block_device=spare_block_device) |
506 | group.filesystems.add(spare_filesystem) |
507 | + elif group_type == FILESYSTEM_GROUP_TYPE.RAID_10: |
508 | + for _ in range(4): |
509 | + block_device = self.make_PhysicalBlockDevice(node) |
510 | + filesystem = self.make_Filesystem( |
511 | + fstype=FILESYSTEM_TYPE.RAID, |
512 | + block_device=block_device) |
513 | + group.filesystems.add(filesystem) |
514 | + spare_block_device = self.make_PhysicalBlockDevice(node) |
515 | + spare_filesystem = self.make_Filesystem( |
516 | + fstype=FILESYSTEM_TYPE.RAID_SPARE, |
517 | + block_device=spare_block_device) |
518 | + group.filesystems.add(spare_filesystem) |
519 | elif group_type == FILESYSTEM_GROUP_TYPE.BCACHE: |
520 | backing_block_device = self.make_PhysicalBlockDevice(node) |
521 | backing_filesystem = self.make_Filesystem( |
522 | |
523 | === modified file 'src/maasserver/tests/test_forms_raid.py' |
524 | --- src/maasserver/tests/test_forms_raid.py 2015-10-28 01:59:30 +0000 |
525 | +++ src/maasserver/tests/test_forms_raid.py 2015-10-29 13:27:52 +0000 |
526 | @@ -261,6 +261,7 @@ |
527 | FILESYSTEM_GROUP_TYPE.RAID_1, |
528 | FILESYSTEM_GROUP_TYPE.RAID_5, |
529 | FILESYSTEM_GROUP_TYPE.RAID_6, |
530 | + FILESYSTEM_GROUP_TYPE.RAID_10, |
531 | ]: |
532 | form = CreateRaidForm(node=node, data={ |
533 | 'name': 'md1', |
At quick glance looks to be missing unit tests for the JS.