Merge lp:~lamont/maas/bug-1647703 into lp:maas/2.1
- bug-1647703
- Merge into 2.1
Proposed by
LaMont Jones
Status: | Superseded |
---|---|
Proposed branch: | lp:~lamont/maas/bug-1647703 |
Merge into: | lp:maas/2.1 |
Diff against target: |
17272 lines (+7220/-4756) (has conflicts) 183 files modified
HACKING.txt (+9/-5) Makefile (+2/-1) buildout.cfg (+2/-3) docs/_templates/maas/static/css/main.css (+14/-0) docs/conf.py (+2/-1) docs/troubleshooting.rst (+1/-1) media/README (+4/-4) required-packages/dev (+2/-1) services/reloader/run (+1/-1) src/maascli/cli.py (+2/-2) src/maasserver/__init__.py (+1/-1) src/maasserver/api/chassis.py (+78/-0) src/maasserver/api/doc.py (+5/-6) src/maasserver/api/doc_handler.py (+10/-10) src/maasserver/api/interfaces.py (+6/-6) src/maasserver/api/nodes.py (+7/-4) src/maasserver/api/results.py (+2/-3) src/maasserver/api/storage.py (+76/-0) src/maasserver/api/subnets.py (+149/-64) src/maasserver/api/tags.py (+1/-2) src/maasserver/api/tests/test_chassis.py (+127/-0) src/maasserver/api/tests/test_doc.py (+12/-14) src/maasserver/api/tests/test_nodes.py (+0/-1) src/maasserver/api/tests/test_storage.py (+125/-0) src/maasserver/api/tests/test_subnets.py (+48/-4) src/maasserver/api/tests/test_vlans.py (+40/-0) src/maasserver/api/vlans.py (+8/-1) src/maasserver/bootresources.py (+43/-17) src/maasserver/clusterrpc/power_parameters.py (+8/-10) src/maasserver/clusterrpc/testing/power_parameters.py (+4/-2) src/maasserver/clusterrpc/tests/test_power_parameters.py (+13/-13) src/maasserver/dhcp.py (+45/-64) src/maasserver/djangosettings/demo.py (+1/-1) src/maasserver/djangosettings/development.py (+1/-1) src/maasserver/djangosettings/settings.py (+3/-3) src/maasserver/djangosettings/tests/test_settings.py (+2/-2) src/maasserver/enum.py (+4/-0) src/maasserver/exceptions.py (+0/-4) src/maasserver/forms_commission.py (+0/-5) src/maasserver/forms_subnet.py (+8/-1) src/maasserver/forms_vlan.py (+25/-0) src/maasserver/locks.py (+3/-3) src/maasserver/management/commands/dbupgrade.py (+46/-3) src/maasserver/management/commands/tests/test_dbupgrade.py (+4/-1) src/maasserver/migrations/builtin/maasserver/0016_migrate_power_data_node_to_bmc.py (+9/-7) src/maasserver/migrations/builtin/maasserver/0022_extract_ip_for_bmcs.py (+6/-3) src/maasserver/migrations/builtin/maasserver/0027_replace_static_range_with_admin_reserved_ranges.py (+1/-1) src/maasserver/migrations/builtin/maasserver/0056_add_description_to_fabric_and_space.py (+1/-1) src/maasserver/migrations/builtin/maasserver/0094_add_unmanaged_subnets.py (+22/-0) src/maasserver/migrations/builtin/maasserver/0095_vlan_relay_vlan.py (+23/-0) src/maasserver/migrations/builtin/maasserver/0096_set_default_vlan_field.py (+24/-0) src/maasserver/migrations/builtin/maasserver/0097_node_chassis_storage_hints.py (+73/-0) src/maasserver/models/__init__.py (+6/-0) src/maasserver/models/bmc.py (+17/-11) src/maasserver/models/chassishints.py (+33/-0) src/maasserver/models/event.py (+8/-1) src/maasserver/models/node.py (+177/-56) src/maasserver/models/signals/nodes.py (+23/-1) src/maasserver/models/signals/tests/test_nodes.py (+25/-0) src/maasserver/models/staticipaddress.py (+207/-28) src/maasserver/models/subnet.py (+69/-7) src/maasserver/models/tests/test_discovery.py (+2/-1) src/maasserver/models/tests/test_event.py (+10/-0) src/maasserver/models/tests/test_neighbour.py (+2/-2) src/maasserver/models/tests/test_node.py (+214/-96) src/maasserver/models/tests/test_staticipaddress.py (+136/-106) src/maasserver/models/tests/test_subnet.py (+85/-11) src/maasserver/models/tests/test_vlan.py (+8/-0) src/maasserver/models/vlan.py (+5/-0) src/maasserver/node_action.py (+0/-4) src/maasserver/rpc/nodes.py (+8/-7) src/maasserver/rpc/rackcontrollers.py (+1/-2) src/maasserver/rpc/regionservice.py (+16/-19) src/maasserver/rpc/tests/test_nodes.py (+2/-2) src/maasserver/rpc/tests/test_regionservice.py (+4/-1) src/maasserver/static/js/angular/controllers/node_details.js (+7/-2) src/maasserver/static/js/angular/controllers/node_events.js (+14/-7) src/maasserver/static/js/angular/controllers/node_result.js (+14/-6) src/maasserver/static/js/angular/controllers/subnet_details.js (+3/-1) src/maasserver/static/js/angular/controllers/tests/test_node_details.js (+33/-0) src/maasserver/static/js/angular/controllers/tests/test_node_events.js (+19/-2) src/maasserver/static/js/angular/controllers/tests/test_node_result.js (+17/-2) src/maasserver/static/js/angular/controllers/tests/test_subnet_details.js (+3/-1) src/maasserver/static/js/angular/controllers/tests/test_vlan_details.js (+56/-3) src/maasserver/static/js/angular/controllers/vlan_details.js (+126/-18) src/maasserver/static/js/angular/factories/tests/test_vlans.js (+6/-3) src/maasserver/static/js/angular/factories/vlans.js (+12/-7) src/maasserver/static/js/angular/maas.js (+10/-0) src/maasserver/static/partials/domain-details.html (+4/-1) src/maasserver/static/partials/node-details.html (+37/-35) src/maasserver/static/partials/node-events.html (+3/-3) src/maasserver/static/partials/node-result.html (+1/-1) src/maasserver/static/partials/nodes-list.html (+3/-3) src/maasserver/static/partials/subnet-details.html (+7/-2) src/maasserver/static/partials/vlan-details.html (+105/-20) src/maasserver/testing/factory.py (+37/-22) src/maasserver/tests/test_bootresources.py (+7/-0) src/maasserver/tests/test_commands.py (+4/-1) src/maasserver/tests/test_dhcp.py (+54/-81) src/maasserver/tests/test_forms_commission.py (+10/-7) src/maasserver/tests/test_forms_vlan.py (+70/-0) src/maasserver/tests/test_node_action.py (+22/-11) src/maasserver/triggers/system.py (+56/-0) src/maasserver/triggers/tests/test_system_listener.py (+114/-0) src/maasserver/triggers/tests/test_websocket_listener.py (+21/-0) src/maasserver/triggers/websocket.py (+42/-0) src/maasserver/urls_api.py (+20/-0) src/maasserver/utils/orm.py (+102/-20) src/maasserver/utils/tests/test_mac.py (+3/-3) src/maasserver/utils/tests/test_orm.py (+204/-1) src/maasserver/websockets/handlers/controller.py (+5/-2) src/maasserver/websockets/handlers/device.py (+2/-0) src/maasserver/websockets/handlers/machine.py (+1/-0) src/maasserver/websockets/handlers/node.py (+2/-3) src/maasserver/websockets/handlers/tests/test_controller.py (+6/-0) src/maasserver/websockets/handlers/tests/test_machine.py (+1/-0) src/maasserver/websockets/handlers/tests/test_subnet.py (+1/-0) src/maasserver/websockets/handlers/tests/test_vlan.py (+15/-0) src/maasserver/websockets/handlers/vlan.py (+13/-6) src/maastesting/matchers.py (+54/-0) src/maastesting/tests/test_matchers.py (+89/-0) src/provisioningserver/boot/__init__.py (+11/-3) src/provisioningserver/boot/pxe.py (+7/-1) src/provisioningserver/boot/tests/test_boot.py (+6/-0) src/provisioningserver/boot/uefi_amd64.py (+7/-1) src/provisioningserver/dhcp/tests/test_config.py (+31/-102) src/provisioningserver/diskless.py (+0/-237) src/provisioningserver/drivers/__init__.py (+78/-104) src/provisioningserver/drivers/chassis/__init__.py (+282/-0) src/provisioningserver/drivers/chassis/tests/test_base.py (+585/-0) src/provisioningserver/drivers/diskless/__init__.py (+0/-102) src/provisioningserver/drivers/diskless/tests/test_base.py (+0/-163) src/provisioningserver/drivers/hardware/tests/test_virsh.py (+14/-0) src/provisioningserver/drivers/hardware/virsh.py (+3/-1) src/provisioningserver/drivers/power/__init__.py (+85/-20) src/provisioningserver/drivers/power/amt.py (+11/-2) src/provisioningserver/drivers/power/apc.py (+17/-2) src/provisioningserver/drivers/power/dli.py (+17/-2) src/provisioningserver/drivers/power/fence_cdu.py (+18/-2) src/provisioningserver/drivers/power/hmc.py (+19/-2) src/provisioningserver/drivers/power/ipmi.py (+31/-2) src/provisioningserver/drivers/power/manual.py (+3/-1) src/provisioningserver/drivers/power/moonshot.py (+16/-2) src/provisioningserver/drivers/power/mscm.py (+19/-2) src/provisioningserver/drivers/power/msftocs.py (+17/-2) src/provisioningserver/drivers/power/nova.py (+21/-2) src/provisioningserver/drivers/power/seamicro.py (+27/-2) src/provisioningserver/drivers/power/tests/test_base.py (+17/-3) src/provisioningserver/drivers/power/ucsm.py (+18/-2) src/provisioningserver/drivers/power/virsh.py (+18/-2) src/provisioningserver/drivers/power/vmware.py (+25/-2) src/provisioningserver/drivers/power/wedge.py (+13/-2) src/provisioningserver/drivers/tests/test_base.py (+151/-61) src/provisioningserver/events.py (+70/-5) src/provisioningserver/import_images/boot_resources.py (+43/-16) src/provisioningserver/import_images/tests/test_boot_resources.py (+15/-6) src/provisioningserver/power/change.py (+0/-269) src/provisioningserver/power/poweraction.py (+0/-136) src/provisioningserver/power/query.py (+0/-206) src/provisioningserver/power/schema.py (+0/-476) src/provisioningserver/power/tests/test_change.py (+0/-563) src/provisioningserver/power/tests/test_query.py (+0/-557) src/provisioningserver/rackdservices/node_power_monitor_service.py (+1/-1) src/provisioningserver/rackdservices/tests/test_tftp.py (+3/-3) src/provisioningserver/rackdservices/tftp.py (+2/-2) src/provisioningserver/rpc/arguments.py (+17/-0) src/provisioningserver/rpc/chassis.py (+67/-0) src/provisioningserver/rpc/cluster.py (+27/-0) src/provisioningserver/rpc/clusterservice.py (+20/-9) src/provisioningserver/rpc/exceptions.py (+8/-0) src/provisioningserver/rpc/power.py (+432/-31) src/provisioningserver/rpc/tests/test_arguments.py (+22/-0) src/provisioningserver/rpc/tests/test_chassis.py (+124/-0) src/provisioningserver/rpc/tests/test_clusterservice.py (+63/-17) src/provisioningserver/rpc/tests/test_power.py (+1109/-145) src/provisioningserver/templates/dns/zone.template (+1/-1) src/provisioningserver/testing/network.py (+0/-40) src/provisioningserver/tests/test_diskless.py (+0/-493) src/provisioningserver/tests/test_events.py (+21/-6) src/provisioningserver/utils/network.py (+4/-3) src/provisioningserver/utils/tests/test_network.py (+8/-5) utilities/check-imports (+0/-27) utilities/remote-reinstall (+0/-4) Contents conflict in src/maasserver/migrations/south/django16_south_maas19.tar.gz Text conflict in src/maasserver/models/staticipaddress.py Text conflict in src/maasserver/models/tests/test_staticipaddress.py Text conflict in src/maasserver/static/partials/vlan-details.html |
To merge this branch: | bzr merge lp:~lamont/maas/bug-1647703 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
MAAS Committers | Pending | ||
Review via email: mp+312686@code.launchpad.net |
This proposal has been superseded by a proposal from 2016-12-07.
Commit message
Update the websocket node when the domain name changes.
Description of the change
Update the websocket node when the domain name changes.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'HACKING.txt' | |||
2 | --- HACKING.txt 2016-03-28 13:54:47 +0000 | |||
3 | +++ HACKING.txt 2016-12-07 15:50:52 +0000 | |||
4 | @@ -138,8 +138,9 @@ | |||
5 | 138 | regiond.log. To enable logging of all exceptions even exceptions where MAAS | 138 | regiond.log. To enable logging of all exceptions even exceptions where MAAS |
6 | 139 | will return the correct HTTP status code.:: | 139 | will return the correct HTTP status code.:: |
7 | 140 | 140 | ||
10 | 141 | $ sudo sed -i 's/DEBUG = False/DEBUG = True/g' /usr/share/maas/maas/settings.py | 141 | $ sudo sed -i 's/DEBUG = False/DEBUG = True/g' \ |
11 | 142 | $ sudo service maas-regiond restart | 142 | > /usr/lib/python3/dist-packages/maasserver/djangosettings/settings.py |
12 | 143 | $ sudo service maas-regiond restart | ||
13 | 143 | 144 | ||
14 | 144 | Run regiond in foreground | 145 | Run regiond in foreground |
15 | 145 | ^^^^^^^^^^^^^^^^^^^^^^^^^ | 146 | ^^^^^^^^^^^^^^^^^^^^^^^^^ |
16 | @@ -149,8 +150,10 @@ | |||
17 | 149 | placed a breakpoint into the code you want to inspect you can start the regiond | 150 | placed a breakpoint into the code you want to inspect you can start the regiond |
18 | 150 | process in the foreground.:: | 151 | process in the foreground.:: |
19 | 151 | 152 | ||
22 | 152 | $ sudo service maas-regiond stop | 153 | $ sudo service maas-regiond stop |
23 | 153 | $ sudo -u maas -H DJANGO_SETTINGS_MODULE=maas.settings PYTHONPATH=/usr/share/maas twistd3 --nodaemon --pidfile= maas-regiond | 154 | $ sudo -u maas -H \ |
24 | 155 | > DJANGO_SETTINGS_MODULE=maasserver.djangosettings.settings \ | ||
25 | 156 | > twistd3 --nodaemon --pidfile= maas-regiond | ||
26 | 154 | 157 | ||
27 | 155 | 158 | ||
28 | 156 | .. Note:: | 159 | .. Note:: |
29 | @@ -175,7 +178,8 @@ | |||
30 | 175 | Development MAAS server setup | 178 | Development MAAS server setup |
31 | 176 | ============================= | 179 | ============================= |
32 | 177 | 180 | ||
34 | 178 | Access to the database is configured in ``src/maas/development.py``. | 181 | Access to the database is configured in |
35 | 182 | ``src/maasserver/djangosettings/development.py``. | ||
36 | 179 | 183 | ||
37 | 180 | The ``Makefile`` or the test suite sets up a development database | 184 | The ``Makefile`` or the test suite sets up a development database |
38 | 181 | cluster inside your branch. It lives in the ``db`` directory, which | 185 | cluster inside your branch. It lives in the ``db`` directory, which |
39 | 182 | 186 | ||
40 | === modified file 'Makefile' | |||
41 | --- Makefile 2016-10-17 06:38:56 +0000 | |||
42 | +++ Makefile 2016-12-07 15:50:52 +0000 | |||
43 | @@ -427,7 +427,8 @@ | |||
44 | 427 | $(warning 'distclean' is deprecated; use 'clean') | 427 | $(warning 'distclean' is deprecated; use 'clean') |
45 | 428 | 428 | ||
46 | 429 | harness: bin/maas-region bin/database | 429 | harness: bin/maas-region bin/database |
48 | 430 | $(dbrun) bin/maas-region shell --settings=maas.demo | 430 | $(dbrun) bin/maas-region shell \ |
49 | 431 | --settings=maasserver.djangosettings.demo | ||
50 | 431 | 432 | ||
51 | 432 | dbharness: bin/database | 433 | dbharness: bin/database |
52 | 433 | bin/database --preserve shell | 434 | bin/database --preserve shell |
53 | 434 | 435 | ||
54 | === modified file 'buildout.cfg' | |||
55 | --- buildout.cfg 2016-10-12 15:26:17 +0000 | |||
56 | +++ buildout.cfg 2016-12-07 15:50:52 +0000 | |||
57 | @@ -104,7 +104,7 @@ | |||
58 | 104 | twistd.region=twisted.scripts.twistd:run | 104 | twistd.region=twisted.scripts.twistd:run |
59 | 105 | initialization = | 105 | initialization = |
60 | 106 | ${common:initialization} | 106 | ${common:initialization} |
62 | 107 | environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.development") | 107 | environ.setdefault("DJANGO_SETTINGS_MODULE", "maasserver.djangosettings.development") |
63 | 108 | scripts = | 108 | scripts = |
64 | 109 | maas-region | 109 | maas-region |
65 | 110 | twistd.region | 110 | twistd.region |
66 | @@ -129,7 +129,6 @@ | |||
67 | 129 | # "--with-resources", | 129 | # "--with-resources", |
68 | 130 | "--with-scenarios", | 130 | "--with-scenarios", |
69 | 131 | "--with-select", | 131 | "--with-select", |
70 | 132 | "--select-dir=src/maas", | ||
71 | 133 | "--select-dir=src/maasserver", | 132 | "--select-dir=src/maasserver", |
72 | 134 | "--select-dir=src/metadataserver", | 133 | "--select-dir=src/metadataserver", |
73 | 135 | "--cover-package=maas,maasserver,metadataserver", | 134 | "--cover-package=maas,maasserver,metadataserver", |
74 | @@ -294,7 +293,7 @@ | |||
75 | 294 | from os import environ | 293 | from os import environ |
76 | 295 | environ.setdefault("MAAS_RACK_DEVELOP", "TRUE") | 294 | environ.setdefault("MAAS_RACK_DEVELOP", "TRUE") |
77 | 296 | environ.setdefault("MAAS_ROOT", "${buildout:directory}/run-e2e") | 295 | environ.setdefault("MAAS_ROOT", "${buildout:directory}/run-e2e") |
79 | 297 | environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.development") | 296 | environ.setdefault("DJANGO_SETTINGS_MODULE", "maasserver.djangosettings.development") |
80 | 298 | environ.setdefault("DEV_DB_NAME", "test_maas_e2e") | 297 | environ.setdefault("DEV_DB_NAME", "test_maas_e2e") |
81 | 299 | environ.setdefault("MAAS_PREVENT_MIGRATIONS", "1") | 298 | environ.setdefault("MAAS_PREVENT_MIGRATIONS", "1") |
82 | 300 | 299 | ||
83 | 301 | 300 | ||
84 | === modified file 'docs/_templates/maas/static/css/main.css' | |||
85 | --- docs/_templates/maas/static/css/main.css 2014-06-09 16:25:19 +0000 | |||
86 | +++ docs/_templates/maas/static/css/main.css 2016-12-07 15:50:52 +0000 | |||
87 | @@ -73,3 +73,17 @@ | |||
88 | 73 | text-decoration: none; | 73 | text-decoration: none; |
89 | 74 | border-bottom: 1px solid #6D4100; | 74 | border-bottom: 1px solid #6D4100; |
90 | 75 | } | 75 | } |
91 | 76 | |||
92 | 77 | /* | ||
93 | 78 | * Custom CSS selectors for the API documentation page. | ||
94 | 79 | * | ||
95 | 80 | * Make subtitles for each API endpoint smaller, so they don't overwhelm | ||
96 | 81 | * the remainder of the documentation. | ||
97 | 82 | */ | ||
98 | 83 | div#maas-api div#operations h4 code.docutils { | ||
99 | 84 | font-size: 75%; | ||
100 | 85 | } | ||
101 | 86 | |||
102 | 87 | div#maas-api div#operations div.section h5 { | ||
103 | 88 | font-size: 90%; | ||
104 | 89 | } | ||
105 | 76 | 90 | ||
106 | === modified file 'docs/conf.py' | |||
107 | --- docs/conf.py 2016-03-28 13:54:47 +0000 | |||
108 | +++ docs/conf.py 2016-12-07 15:50:52 +0000 | |||
109 | @@ -24,7 +24,8 @@ | |||
110 | 24 | from pytz import UTC | 24 | from pytz import UTC |
111 | 25 | 25 | ||
112 | 26 | # Configure MAAS's settings. | 26 | # Configure MAAS's settings. |
114 | 27 | environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.settings") | 27 | environ.setdefault( |
115 | 28 | "DJANGO_SETTINGS_MODULE", "maasserver.djangosettings.settings") | ||
116 | 28 | 29 | ||
117 | 29 | # If extensions (or modules to document with autodoc) are in another directory, | 30 | # If extensions (or modules to document with autodoc) are in another directory, |
118 | 30 | # add these directories to sys.path here. If the directory is relative to the | 31 | # add these directories to sys.path here. If the directory is relative to the |
119 | 31 | 32 | ||
120 | === modified file 'docs/troubleshooting.rst' | |||
121 | --- docs/troubleshooting.rst 2014-09-10 16:20:31 +0000 | |||
122 | +++ docs/troubleshooting.rst 2016-12-07 15:50:52 +0000 | |||
123 | @@ -111,7 +111,7 @@ | |||
124 | 111 | always point at the local server. | 111 | always point at the local server. |
125 | 112 | #. If you are still getting "404 - Page not found" errors, check that the MAAS | 112 | #. If you are still getting "404 - Page not found" errors, check that the MAAS |
126 | 113 | web interface has been installed in the right place. There should be a file | 113 | web interface has been installed in the right place. There should be a file |
128 | 114 | present called /usr/share/maas/maas/urls.py | 114 | called ``urls.py`` in ``/usr/lib/python3/dist-packages/maasserver/djangosettings/``. |
129 | 115 | 115 | ||
130 | 116 | Debugging ephemeral image | 116 | Debugging ephemeral image |
131 | 117 | ========================= | 117 | ========================= |
132 | 118 | 118 | ||
133 | === modified file 'media/README' | |||
134 | --- media/README 2012-03-11 21:13:22 +0000 | |||
135 | +++ media/README 2016-12-07 15:50:52 +0000 | |||
136 | @@ -1,5 +1,5 @@ | |||
137 | 1 | This folder contains somewhat ephemeral things: subfolders serve as | 1 | This folder contains somewhat ephemeral things: subfolders serve as |
142 | 2 | MEDIA_ROOT for maas.demo and maas.development environments. The | 2 | MEDIA_ROOT for maasserver.djangosettings.demo and .development |
143 | 3 | media/demo directory should always exist and not be deleted, though | 3 | environments. The media/demo directory should always exist and not be |
144 | 4 | its contents can be. The media/development directory should be created | 4 | deleted, though its contents can be. The media/development directory |
145 | 5 | and destroyed by tests, as needed. | 5 | should be created and destroyed by tests, as needed. |
146 | 6 | 6 | ||
147 | === modified file 'required-packages/dev' | |||
148 | --- required-packages/dev 2016-08-24 20:20:55 +0000 | |||
149 | +++ required-packages/dev 2016-12-07 15:50:52 +0000 | |||
150 | @@ -13,10 +13,10 @@ | |||
151 | 13 | libjs-jquery | 13 | libjs-jquery |
152 | 14 | libjs-jquery-hotkeys | 14 | libjs-jquery-hotkeys |
153 | 15 | libjs-yui3-full | 15 | libjs-yui3-full |
154 | 16 | libnss-wrapper | ||
155 | 16 | make | 17 | make |
156 | 17 | nodejs-legacy | 18 | nodejs-legacy |
157 | 18 | npm | 19 | npm |
158 | 19 | python-pocket-lint | ||
159 | 20 | python-bson | 20 | python-bson |
160 | 21 | python-crochet | 21 | python-crochet |
161 | 22 | python-django | 22 | python-django |
162 | @@ -26,6 +26,7 @@ | |||
163 | 26 | python-lxml | 26 | python-lxml |
164 | 27 | python-netaddr | 27 | python-netaddr |
165 | 28 | python-netifaces | 28 | python-netifaces |
166 | 29 | python-pocket-lint | ||
167 | 29 | python-psycopg2 | 30 | python-psycopg2 |
168 | 30 | python-simplejson | 31 | python-simplejson |
169 | 31 | python-tempita | 32 | python-tempita |
170 | 32 | 33 | ||
171 | === modified file 'services/reloader/run' | |||
172 | --- services/reloader/run 2016-05-11 19:01:48 +0000 | |||
173 | +++ services/reloader/run 2016-12-07 15:50:52 +0000 | |||
174 | @@ -128,7 +128,7 @@ | |||
175 | 128 | exclude_filter=lambda path: ( | 128 | exclude_filter=lambda path: ( |
176 | 129 | "/test/" in path or "/testing/" in path or "/." in path)) | 129 | "/test/" in path or "/testing/" in path or "/." in path)) |
177 | 130 | wm.add_watch( | 130 | wm.add_watch( |
179 | 131 | ["src/maas*", "src/meta*"], TRIGGER_EVENTS, | 131 | ["src/maasserver", "src/metadataserver"], TRIGGER_EVENTS, |
180 | 132 | proc_fun=handle_maas_change, rec=True, auto_add=True, do_glob=True) | 132 | proc_fun=handle_maas_change, rec=True, auto_add=True, do_glob=True) |
181 | 133 | wm.add_watch( | 133 | wm.add_watch( |
182 | 134 | ["src/prov*"], TRIGGER_EVENTS, proc_fun=handle_pserv_change, | 134 | ["src/prov*"], TRIGGER_EVENTS, proc_fun=handle_pserv_change, |
183 | 135 | 135 | ||
184 | === modified file 'src/maascli/cli.py' | |||
185 | --- src/maascli/cli.py 2016-07-30 01:17:54 +0000 | |||
186 | +++ src/maascli/cli.py 2016-12-07 15:50:52 +0000 | |||
187 | @@ -189,8 +189,8 @@ | |||
188 | 189 | # Setup and the allowed django commands into the maascli. | 189 | # Setup and the allowed django commands into the maascli. |
189 | 190 | management = get_django_management() | 190 | management = get_django_management() |
190 | 191 | if management is not None and is_maasserver_available(): | 191 | if management is not None and is_maasserver_available(): |
193 | 192 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.settings") | 192 | os.environ.setdefault( |
194 | 193 | sys.path.append('/usr/share/maas') | 193 | "DJANGO_SETTINGS_MODULE", "maasserver.djangosettings.settings") |
195 | 194 | load_regiond_commands(management, parser) | 194 | load_regiond_commands(management, parser) |
196 | 195 | 195 | ||
197 | 196 | 196 | ||
198 | 197 | 197 | ||
199 | === modified file 'src/maasserver/__init__.py' | |||
200 | --- src/maasserver/__init__.py 2016-10-20 14:45:06 +0000 | |||
201 | +++ src/maasserver/__init__.py 2016-12-07 15:50:52 +0000 | |||
202 | @@ -8,7 +8,7 @@ | |||
203 | 8 | 'DefaultViewMeta', | 8 | 'DefaultViewMeta', |
204 | 9 | 'is_master_process', | 9 | 'is_master_process', |
205 | 10 | 'logger', | 10 | 'logger', |
207 | 11 | ] | 11 | ] |
208 | 12 | 12 | ||
209 | 13 | import logging | 13 | import logging |
210 | 14 | from os import environ | 14 | from os import environ |
211 | 15 | 15 | ||
212 | === added file 'src/maasserver/api/chassis.py' | |||
213 | --- src/maasserver/api/chassis.py 1970-01-01 00:00:00 +0000 | |||
214 | +++ src/maasserver/api/chassis.py 2016-12-07 15:50:52 +0000 | |||
215 | @@ -0,0 +1,78 @@ | |||
216 | 1 | # Copyright 2016 Canonical Ltd. This software is licensed under the | ||
217 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
218 | 3 | |||
219 | 4 | __all__ = [ | ||
220 | 5 | "ChassiHandler", | ||
221 | 6 | "ChassisHandler", | ||
222 | 7 | ] | ||
223 | 8 | |||
224 | 9 | from maasserver.api.nodes import ( | ||
225 | 10 | NodeHandler, | ||
226 | 11 | NodesHandler, | ||
227 | 12 | ) | ||
228 | 13 | from maasserver.enum import NODE_PERMISSION | ||
229 | 14 | from maasserver.models.node import Chassis | ||
230 | 15 | from piston3.utils import rc | ||
231 | 16 | |||
232 | 17 | # Chassis fields exposed on the API. | ||
233 | 18 | DISPLAYED_CHASSIS_FIELDS = ( | ||
234 | 19 | 'system_id', | ||
235 | 20 | 'hostname', | ||
236 | 21 | 'cpu_count', | ||
237 | 22 | 'memory', | ||
238 | 23 | 'chassis_type', | ||
239 | 24 | 'node_type', | ||
240 | 25 | 'node_type_name', | ||
241 | 26 | ) | ||
242 | 27 | |||
243 | 28 | |||
244 | 29 | class ChassiHandler(NodeHandler): | ||
245 | 30 | """Manage an individual chassis. | ||
246 | 31 | |||
247 | 32 | The chassis is identified by its system_id. | ||
248 | 33 | """ | ||
249 | 34 | api_doc_section_name = "Chassis" | ||
250 | 35 | |||
251 | 36 | create = update = None | ||
252 | 37 | model = Chassis | ||
253 | 38 | fields = DISPLAYED_CHASSIS_FIELDS | ||
254 | 39 | |||
255 | 40 | @classmethod | ||
256 | 41 | def chassis_type(cls, chassis): | ||
257 | 42 | return chassis.power_type | ||
258 | 43 | |||
259 | 44 | def delete(self, request, system_id): | ||
260 | 45 | """Delete a specific Chassis. | ||
261 | 46 | |||
262 | 47 | Returns 404 if the chassis is not found. | ||
263 | 48 | Returns 403 if the user does not have permission to delete the chassis. | ||
264 | 49 | Returns 204 if the chassis is successfully deleted. | ||
265 | 50 | """ | ||
266 | 51 | chassis = self.model.objects.get_node_or_404( | ||
267 | 52 | system_id=system_id, user=request.user, | ||
268 | 53 | perm=NODE_PERMISSION.ADMIN) | ||
269 | 54 | chassis.delete() | ||
270 | 55 | return rc.DELETED | ||
271 | 56 | |||
272 | 57 | @classmethod | ||
273 | 58 | def resource_uri(cls, chassis=None): | ||
274 | 59 | # This method is called by piston in two different contexts: | ||
275 | 60 | # - when generating an uri template to be used in the documentation | ||
276 | 61 | # (in this case, it is called with node=None). | ||
277 | 62 | # - when populating the 'resource_uri' field of an object | ||
278 | 63 | # returned by the API (in this case, node is a node object). | ||
279 | 64 | chassis_system_id = "system_id" | ||
280 | 65 | if chassis is not None: | ||
281 | 66 | chassis_system_id = chassis.system_id | ||
282 | 67 | return ('chassi_handler', (chassis_system_id,)) | ||
283 | 68 | |||
284 | 69 | |||
285 | 70 | class ChassisHandler(NodesHandler): | ||
286 | 71 | """Manage the collection of all the chassis in the MAAS.""" | ||
287 | 72 | api_doc_section_name = "Chassis" | ||
288 | 73 | create = update = delete = None | ||
289 | 74 | base_model = Chassis | ||
290 | 75 | |||
291 | 76 | @classmethod | ||
292 | 77 | def resource_uri(cls, *args, **kwargs): | ||
293 | 78 | return ('chassis_handler', []) | ||
294 | 0 | 79 | ||
295 | === modified file 'src/maasserver/api/doc.py' | |||
296 | --- src/maasserver/api/doc.py 2016-04-12 22:25:44 +0000 | |||
297 | +++ src/maasserver/api/doc.py 2016-12-07 15:50:52 +0000 | |||
298 | @@ -33,7 +33,7 @@ | |||
299 | 33 | from piston3.doc import generate_doc | 33 | from piston3.doc import generate_doc |
300 | 34 | from piston3.handler import BaseHandler | 34 | from piston3.handler import BaseHandler |
301 | 35 | from piston3.resource import Resource | 35 | from piston3.resource import Resource |
303 | 36 | from provisioningserver.power.schema import JSON_POWER_TYPE_PARAMETERS | 36 | from provisioningserver.drivers.power import PowerDriverRegistry |
304 | 37 | 37 | ||
305 | 38 | 38 | ||
306 | 39 | def accumulate_api_resources(resolver, accumulator): | 39 | def accumulate_api_resources(resolver, accumulator): |
307 | @@ -77,8 +77,7 @@ | |||
308 | 77 | def generate_power_types_doc(): | 77 | def generate_power_types_doc(): |
309 | 78 | """Generate ReST documentation for the supported power types. | 78 | """Generate ReST documentation for the supported power types. |
310 | 79 | 79 | ||
313 | 80 | The documentation is derived from the `JSON_POWER_TYPE_PARAMETERS` | 80 | The documentation is derived from the `PowerDriverRegistry`. |
312 | 81 | object. | ||
314 | 82 | """ | 81 | """ |
315 | 83 | output = StringIO() | 82 | output = StringIO() |
316 | 84 | line = partial(print, file=output) | 83 | line = partial(print, file=output) |
317 | @@ -92,14 +91,14 @@ | |||
318 | 92 | "list if the cluster in question is from an older version of " | 91 | "list if the cluster in question is from an older version of " |
319 | 93 | "MAAS.") | 92 | "MAAS.") |
320 | 94 | line() | 93 | line() |
323 | 95 | for item in JSON_POWER_TYPE_PARAMETERS: | 94 | for _, driver in PowerDriverRegistry: |
324 | 96 | title = "%s (%s)" % (item['name'], item['description']) | 95 | title = "%s (%s)" % (driver.name, driver.description) |
325 | 97 | line(title) | 96 | line(title) |
326 | 98 | line('=' * len(title)) | 97 | line('=' * len(title)) |
327 | 99 | line('') | 98 | line('') |
328 | 100 | line("Power parameters:") | 99 | line("Power parameters:") |
329 | 101 | line('') | 100 | line('') |
331 | 102 | for field in item['fields']: | 101 | for field in driver.settings: |
332 | 103 | field_description = [] | 102 | field_description = [] |
333 | 104 | field_description.append( | 103 | field_description.append( |
334 | 105 | "* %s (%s)." % (field['name'], field['label'])) | 104 | "* %s (%s)." % (field['name'], field['label'])) |
335 | 106 | 105 | ||
336 | === modified file 'src/maasserver/api/doc_handler.py' | |||
337 | --- src/maasserver/api/doc_handler.py 2016-08-18 17:31:05 +0000 | |||
338 | +++ src/maasserver/api/doc_handler.py 2016-12-07 15:50:52 +0000 | |||
339 | @@ -9,7 +9,7 @@ | |||
340 | 9 | 9 | ||
341 | 10 | 10 | ||
342 | 11 | API versions | 11 | API versions |
344 | 12 | ------------ | 12 | ```````````` |
345 | 13 | 13 | ||
346 | 14 | At any given time, MAAS may support multiple versions of its API. The version | 14 | At any given time, MAAS may support multiple versions of its API. The version |
347 | 15 | number is included in the API's URL, e.g. /api/2.0/ | 15 | number is included in the API's URL, e.g. /api/2.0/ |
348 | @@ -23,7 +23,7 @@ | |||
349 | 23 | 23 | ||
350 | 24 | 24 | ||
351 | 25 | HTTP methods and parameter-passing | 25 | HTTP methods and parameter-passing |
353 | 26 | ---------------------------------- | 26 | `````````````````````````````````` |
354 | 27 | 27 | ||
355 | 28 | The following HTTP methods are available for accessing the API: | 28 | The following HTTP methods are available for accessing the API: |
356 | 29 | * GET (for information retrieval and queries), | 29 | * GET (for information retrieval and queries), |
357 | @@ -82,6 +82,7 @@ | |||
358 | 82 | # etc. whatever render_api_docs() produces, so that you can concatenate | 82 | # etc. whatever render_api_docs() produces, so that you can concatenate |
359 | 83 | # the two. | 83 | # the two. |
360 | 84 | api_doc_title = dedent(""" | 84 | api_doc_title = dedent(""" |
361 | 85 | :tocdepth: 3 | ||
362 | 85 | .. _region-controller-api: | 86 | .. _region-controller-api: |
363 | 86 | 87 | ||
364 | 87 | ======== | 88 | ======== |
365 | @@ -109,7 +110,7 @@ | |||
366 | 109 | line() | 110 | line() |
367 | 110 | line() | 111 | line() |
368 | 111 | line('Operations') | 112 | line('Operations') |
370 | 112 | line('----------') | 113 | line('``````````') |
371 | 113 | line() | 114 | line() |
372 | 114 | 115 | ||
373 | 115 | def export_key(export): | 116 | def export_key(export): |
374 | @@ -132,25 +133,24 @@ | |||
375 | 132 | section_name = doc.handler.api_doc_section_name | 133 | section_name = doc.handler.api_doc_section_name |
376 | 133 | line(section_name) | 134 | line(section_name) |
377 | 134 | line('=' * len(section_name)) | 135 | line('=' * len(section_name)) |
379 | 135 | line(doc.handler.__doc__.strip()) | 136 | line(dedent(doc.handler.__doc__).strip()) |
380 | 136 | line() | 137 | line() |
381 | 137 | line() | 138 | line() |
382 | 138 | for (http_method, op), function in sorted(exports, key=export_key): | 139 | for (http_method, op), function in sorted(exports, key=export_key): |
386 | 139 | line("``%s %s``" % (http_method, uri_template), end="") | 140 | operation = " op=%s" % op if op is not None else "" |
387 | 140 | if op is not None: | 141 | subsection = "``%s %s%s``" % (http_method, uri_template, operation) |
388 | 141 | line(" ``op=%s``" % op, end="") | 142 | line("%s\n%s\n" % (subsection, '#' * len(subsection))) |
389 | 142 | line() | 143 | line() |
390 | 143 | docstring = getdoc(function) | 144 | docstring = getdoc(function) |
391 | 144 | if docstring is not None: | 145 | if docstring is not None: |
393 | 145 | for docline in docstring.splitlines(): | 146 | for docline in dedent(docstring).splitlines(): |
394 | 146 | if docline.strip() == '': | 147 | if docline.strip() == '': |
395 | 147 | # Blank line. Don't indent. | 148 | # Blank line. Don't indent. |
396 | 148 | line() | 149 | line() |
397 | 149 | else: | 150 | else: |
398 | 150 | # Print documentation line, indented. | 151 | # Print documentation line, indented. |
400 | 151 | line(" ", docline, sep="") | 152 | line(docline) |
401 | 152 | line() | 153 | line() |
402 | 153 | |||
403 | 154 | line() | 154 | line() |
404 | 155 | line() | 155 | line() |
405 | 156 | line(generate_power_types_doc()) | 156 | line(generate_power_types_doc()) |
406 | 157 | 157 | ||
407 | === modified file 'src/maasserver/api/interfaces.py' | |||
408 | --- src/maasserver/api/interfaces.py 2016-10-20 16:04:24 +0000 | |||
409 | +++ src/maasserver/api/interfaces.py 2016-12-07 15:50:52 +0000 | |||
410 | @@ -436,18 +436,18 @@ | |||
411 | 436 | 436 | ||
412 | 437 | Following are parameters specific to bonds: | 437 | Following are parameters specific to bonds: |
413 | 438 | 438 | ||
415 | 439 | :param bond-mode: The operating mode of the bond. | 439 | :param bond_mode: The operating mode of the bond. |
416 | 440 | (Default: active-backup). | 440 | (Default: active-backup). |
418 | 441 | :param bond-miimon: The link monitoring freqeuncy in milliseconds. | 441 | :param bond_miimon: The link monitoring freqeuncy in milliseconds. |
419 | 442 | (Default: 100). | 442 | (Default: 100). |
421 | 443 | :param bond-downdelay: Specifies the time, in milliseconds, to wait | 443 | :param bond_downdelay: Specifies the time, in milliseconds, to wait |
422 | 444 | before disabling a slave after a link failure has been detected. | 444 | before disabling a slave after a link failure has been detected. |
424 | 445 | :param bond-updelay: Specifies the time, in milliseconds, to wait | 445 | :param bond_updelay: Specifies the time, in milliseconds, to wait |
425 | 446 | before enabling a slave after a link recovery has been detected. | 446 | before enabling a slave after a link recovery has been detected. |
427 | 447 | :param bond-lacp_rate: Option specifying the rate in which we'll ask | 447 | :param bond_lacp_rate: Option specifying the rate in which we'll ask |
428 | 448 | our link partner to transmit LACPDU packets in 802.3ad mode. | 448 | our link partner to transmit LACPDU packets in 802.3ad mode. |
429 | 449 | Available options are fast or slow. (Default: slow). | 449 | Available options are fast or slow. (Default: slow). |
431 | 450 | :param bond-xmit_hash_policy: The transmit hash policy to use for | 450 | :param bond_xmit_hash_policy: The transmit hash policy to use for |
432 | 451 | slave selection in balance-xor, 802.3ad, and tlb modes. | 451 | slave selection in balance-xor, 802.3ad, and tlb modes. |
433 | 452 | 452 | ||
434 | 453 | Supported bonding modes (bond-mode): | 453 | Supported bonding modes (bond-mode): |
435 | 454 | 454 | ||
436 | === modified file 'src/maasserver/api/nodes.py' | |||
437 | --- src/maasserver/api/nodes.py 2016-06-17 07:16:39 +0000 | |||
438 | +++ src/maasserver/api/nodes.py 2016-12-07 15:50:52 +0000 | |||
439 | @@ -47,10 +47,9 @@ | |||
440 | 47 | Node, | 47 | Node, |
441 | 48 | OwnerData, | 48 | OwnerData, |
442 | 49 | ) | 49 | ) |
443 | 50 | from maasserver.models.node import typecast_to_node_type | ||
444 | 51 | from maasserver.models.nodeprobeddetails import get_single_probed_details | 50 | from maasserver.models.nodeprobeddetails import get_single_probed_details |
445 | 52 | from piston3.utils import rc | 51 | from piston3.utils import rc |
447 | 53 | from provisioningserver.power.schema import UNKNOWN_POWER_TYPE | 52 | from provisioningserver.drivers.power import UNKNOWN_POWER_TYPE |
448 | 54 | 53 | ||
449 | 55 | 54 | ||
450 | 56 | def store_node_power_parameters(node, request): | 55 | def store_node_power_parameters(node, request): |
451 | @@ -171,7 +170,7 @@ | |||
452 | 171 | else: | 170 | else: |
453 | 172 | # Return the specific node type object so we get the correct | 171 | # Return the specific node type object so we get the correct |
454 | 173 | # listing | 172 | # listing |
456 | 174 | return typecast_to_node_type(node) | 173 | return node.as_self() |
457 | 175 | 174 | ||
458 | 176 | def delete(self, request, system_id): | 175 | def delete(self, request, system_id): |
459 | 177 | """Delete a specific Node. | 176 | """Delete a specific Node. |
460 | @@ -183,7 +182,7 @@ | |||
461 | 183 | node = self.model.objects.get_node_or_404( | 182 | node = self.model.objects.get_node_or_404( |
462 | 184 | system_id=system_id, user=request.user, | 183 | system_id=system_id, user=request.user, |
463 | 185 | perm=NODE_PERMISSION.ADMIN) | 184 | perm=NODE_PERMISSION.ADMIN) |
465 | 186 | typecast_to_node_type(node).delete() | 185 | node.as_self().delete() |
466 | 187 | return rc.DELETED | 186 | return rc.DELETED |
467 | 188 | 187 | ||
468 | 189 | @classmethod | 188 | @classmethod |
469 | @@ -315,19 +314,23 @@ | |||
470 | 315 | 314 | ||
471 | 316 | if self.base_model == Node: | 315 | if self.base_model == Node: |
472 | 317 | # Avoid circular dependencies | 316 | # Avoid circular dependencies |
473 | 317 | from maasserver.api.chassis import ChassisHandler | ||
474 | 318 | from maasserver.api.devices import DevicesHandler | 318 | from maasserver.api.devices import DevicesHandler |
475 | 319 | from maasserver.api.machines import MachinesHandler | 319 | from maasserver.api.machines import MachinesHandler |
476 | 320 | from maasserver.api.rackcontrollers import RackControllersHandler | 320 | from maasserver.api.rackcontrollers import RackControllersHandler |
477 | 321 | from maasserver.api.regioncontrollers import ( | 321 | from maasserver.api.regioncontrollers import ( |
478 | 322 | RegionControllersHandler | 322 | RegionControllersHandler |
479 | 323 | ) | 323 | ) |
480 | 324 | from maasserver.api.storage import StoragesHandler | ||
481 | 324 | racks = RackControllersHandler().read(request).order_by("id") | 325 | racks = RackControllersHandler().read(request).order_by("id") |
482 | 325 | nodes = list(chain( | 326 | nodes = list(chain( |
483 | 327 | ChassisHandler().read(request).order_by("id"), | ||
484 | 326 | DevicesHandler().read(request).order_by("id"), | 328 | DevicesHandler().read(request).order_by("id"), |
485 | 327 | MachinesHandler().read(request).order_by("id"), | 329 | MachinesHandler().read(request).order_by("id"), |
486 | 328 | racks, | 330 | racks, |
487 | 329 | RegionControllersHandler().read(request).exclude( | 331 | RegionControllersHandler().read(request).exclude( |
488 | 330 | id__in=racks).order_by("id"), | 332 | id__in=racks).order_by("id"), |
489 | 333 | StoragesHandler().read(request).order_by("id"), | ||
490 | 331 | )) | 334 | )) |
491 | 332 | return nodes | 335 | return nodes |
492 | 333 | else: | 336 | else: |
493 | 334 | 337 | ||
494 | === modified file 'src/maasserver/api/results.py' | |||
495 | --- src/maasserver/api/results.py 2016-07-30 01:17:54 +0000 | |||
496 | +++ src/maasserver/api/results.py 2016-12-07 15:50:52 +0000 | |||
497 | @@ -14,7 +14,6 @@ | |||
498 | 14 | ) | 14 | ) |
499 | 15 | from maasserver.enum import NODE_PERMISSION | 15 | from maasserver.enum import NODE_PERMISSION |
500 | 16 | from maasserver.models import Node | 16 | from maasserver.models import Node |
501 | 17 | from maasserver.models.node import typecast_to_node_type | ||
502 | 18 | from metadataserver.models import NodeResult | 17 | from metadataserver.models import NodeResult |
503 | 19 | 18 | ||
504 | 20 | 19 | ||
505 | @@ -54,9 +53,9 @@ | |||
506 | 54 | if result_type is not None: | 53 | if result_type is not None: |
507 | 55 | results = results.filter(result_type__in=result_type) | 54 | results = results.filter(result_type__in=result_type) |
508 | 56 | # Convert the node objects into typed node objects so we get the | 55 | # Convert the node objects into typed node objects so we get the |
510 | 57 | # proper listing | 56 | # proper listing. |
511 | 58 | for result in results: | 57 | for result in results: |
513 | 59 | result.node = typecast_to_node_type(result.node) | 58 | result.node = result.node.as_self() |
514 | 60 | return results | 59 | return results |
515 | 61 | 60 | ||
516 | 62 | @classmethod | 61 | @classmethod |
517 | 63 | 62 | ||
518 | === added file 'src/maasserver/api/storage.py' | |||
519 | --- src/maasserver/api/storage.py 1970-01-01 00:00:00 +0000 | |||
520 | +++ src/maasserver/api/storage.py 2016-12-07 15:50:52 +0000 | |||
521 | @@ -0,0 +1,76 @@ | |||
522 | 1 | # Copyright 2016 Canonical Ltd. This software is licensed under the | ||
523 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
524 | 3 | |||
525 | 4 | __all__ = [ | ||
526 | 5 | "StorageHandler", | ||
527 | 6 | "StoragesHandler", | ||
528 | 7 | ] | ||
529 | 8 | |||
530 | 9 | from maasserver.api.nodes import ( | ||
531 | 10 | NodeHandler, | ||
532 | 11 | NodesHandler, | ||
533 | 12 | ) | ||
534 | 13 | from maasserver.enum import NODE_PERMISSION | ||
535 | 14 | from maasserver.models.node import Storage | ||
536 | 15 | from piston3.utils import rc | ||
537 | 16 | |||
538 | 17 | # Storage fields exposed on the API. | ||
539 | 18 | DISPLAYED_STORAGE_FIELDS = ( | ||
540 | 19 | 'system_id', | ||
541 | 20 | 'hostname', | ||
542 | 21 | 'storage_type', | ||
543 | 22 | 'node_type', | ||
544 | 23 | 'node_type_name', | ||
545 | 24 | ) | ||
546 | 25 | |||
547 | 26 | |||
548 | 27 | class StorageHandler(NodeHandler): | ||
549 | 28 | """Manage an individual storage system. | ||
550 | 29 | |||
551 | 30 | The storage is identified by its system_id. | ||
552 | 31 | """ | ||
553 | 32 | api_doc_section_name = "Storage" | ||
554 | 33 | |||
555 | 34 | create = update = None | ||
556 | 35 | model = Storage | ||
557 | 36 | fields = DISPLAYED_STORAGE_FIELDS | ||
558 | 37 | |||
559 | 38 | @classmethod | ||
560 | 39 | def storage_type(cls, storage): | ||
561 | 40 | return storage.power_type | ||
562 | 41 | |||
563 | 42 | def delete(self, request, system_id): | ||
564 | 43 | """Delete a specific Storage. | ||
565 | 44 | |||
566 | 45 | Returns 404 if the storage is not found. | ||
567 | 46 | Returns 403 if the user does not have permission to delete the storage. | ||
568 | 47 | Returns 204 if the storage is successfully deleted. | ||
569 | 48 | """ | ||
570 | 49 | storage = self.model.objects.get_node_or_404( | ||
571 | 50 | system_id=system_id, user=request.user, | ||
572 | 51 | perm=NODE_PERMISSION.ADMIN) | ||
573 | 52 | storage.delete() | ||
574 | 53 | return rc.DELETED | ||
575 | 54 | |||
576 | 55 | @classmethod | ||
577 | 56 | def resource_uri(cls, storage=None): | ||
578 | 57 | # This method is called by piston in two different contexts: | ||
579 | 58 | # - when generating an uri template to be used in the documentation | ||
580 | 59 | # (in this case, it is called with node=None). | ||
581 | 60 | # - when populating the 'resource_uri' field of an object | ||
582 | 61 | # returned by the API (in this case, node is a node object). | ||
583 | 62 | storage_system_id = "system_id" | ||
584 | 63 | if storage is not None: | ||
585 | 64 | storage_system_id = storage.system_id | ||
586 | 65 | return ('storage_handler', (storage_system_id,)) | ||
587 | 66 | |||
588 | 67 | |||
589 | 68 | class StoragesHandler(NodesHandler): | ||
590 | 69 | """Manage the collection of all the storage in the MAAS.""" | ||
591 | 70 | api_doc_section_name = "Storages" | ||
592 | 71 | create = update = delete = None | ||
593 | 72 | base_model = Storage | ||
594 | 73 | |||
595 | 74 | @classmethod | ||
596 | 75 | def resource_uri(cls, *args, **kwargs): | ||
597 | 76 | return ('storages_handler', []) | ||
598 | 0 | 77 | ||
599 | === modified file 'src/maasserver/api/subnets.py' | |||
600 | --- src/maasserver/api/subnets.py 2016-09-23 01:32:02 +0000 | |||
601 | +++ src/maasserver/api/subnets.py 2016-12-07 15:50:52 +0000 | |||
602 | @@ -29,6 +29,7 @@ | |||
603 | 29 | 'rdns_mode', | 29 | 'rdns_mode', |
604 | 30 | 'active_discovery', | 30 | 'active_discovery', |
605 | 31 | 'allow_proxy', | 31 | 'allow_proxy', |
606 | 32 | 'managed', | ||
607 | 32 | ) | 33 | ) |
608 | 33 | 34 | ||
609 | 34 | 35 | ||
610 | @@ -49,32 +50,76 @@ | |||
611 | 49 | 50 | ||
612 | 50 | @admin_method | 51 | @admin_method |
613 | 51 | def create(self, request): | 52 | def create(self, request): |
640 | 52 | """Create a subnet. | 53 | """\ |
641 | 53 | 54 | Create a subnet. | |
642 | 54 | :param name: Name of the subnet. | 55 | |
643 | 55 | :param description: Description of the subnet. | 56 | Required parameters |
644 | 56 | :param fabric: Fabric for the subnet. Defaults to the fabric the | 57 | ------------------- |
645 | 57 | provided VLAN belongs to or defaults to the default fabric. | 58 | |
646 | 58 | :param vlan: VLAN this subnet belongs to. Defaults to the default | 59 | cidr |
647 | 59 | VLAN for the provided fabric or defaults to the default VLAN in | 60 | The network CIDR for this subnet. |
648 | 60 | the default fabric. | 61 | |
649 | 61 | :param vid: VID of the VLAN this subnet belongs to. Only used when | 62 | |
650 | 62 | vlan is not provided. Picks the VLAN with this VID in the provided | 63 | Optional parameters |
651 | 63 | fabric or the default fabric if one is not given. | 64 | ------------------- |
652 | 64 | :param space: Space this subnet is in. Defaults to the default space. | 65 | |
653 | 65 | :param cidr: The network CIDR for this subnet. | 66 | name |
654 | 66 | :param gateway_ip: The gateway IP address for this subnet. | 67 | Name of the subnet. |
655 | 67 | :param rdns_mode: How reverse DNS is handled for this subnet. | 68 | |
656 | 68 | One of: 0 (Disabled), 1 (Enabled), or 2 (RFC2317). Disabled means | 69 | description |
657 | 69 | no reverse zone is created; Enabled means generate the reverse | 70 | Description of the subnet. |
658 | 70 | zone; RFC2317 extends Enabled to create the necessary parent zone | 71 | |
659 | 71 | with the appropriate CNAME resource records for the network, if the | 72 | vlan |
660 | 72 | network is small enough to require the support described in | 73 | VLAN this subnet belongs to. Defaults to the default VLAN for the |
661 | 73 | RFC2317. | 74 | provided fabric or defaults to the default VLAN in the default fabric |
662 | 74 | :param allow_proxy: Configure maas-proxy to allow requests from this | 75 | (if unspecified). |
663 | 75 | subnet. | 76 | |
664 | 76 | :param dns_servers: Comma-seperated list of DNS servers for this | 77 | fabric |
665 | 77 | subnet. | 78 | Fabric for the subnet. Defaults to the fabric the |
666 | 79 | provided VLAN belongs to, or defaults to the default fabric. | ||
667 | 80 | |||
668 | 81 | vid | ||
669 | 82 | VID of the VLAN this subnet belongs to. Only used when vlan is | ||
670 | 83 | not provided. Picks the VLAN with this VID in the provided | ||
671 | 84 | fabric or the default fabric if one is not given. | ||
672 | 85 | |||
673 | 86 | space | ||
674 | 87 | Space this subnet is in. Defaults to the default space. | ||
675 | 88 | |||
676 | 89 | gateway_ip | ||
677 | 90 | The gateway IP address for this subnet. | ||
678 | 91 | |||
679 | 92 | rdns_mode | ||
680 | 93 | How reverse DNS is handled for this subnet. | ||
681 | 94 | One of: 0 (Disabled), 1 (Enabled), or 2 (RFC2317). Disabled | ||
682 | 95 | means no reverse zone is created; Enabled means generate the | ||
683 | 96 | reverse zone; RFC2317 extends Enabled to create the necessary | ||
684 | 97 | parent zone with the appropriate CNAME resource records for the | ||
685 | 98 | network, if the network is small enough to require the support | ||
686 | 99 | described in RFC2317. | ||
687 | 100 | |||
688 | 101 | allow_proxy | ||
689 | 102 | Configure maas-proxy to allow requests from this | ||
690 | 103 | subnet. | ||
691 | 104 | |||
692 | 105 | dns_servers | ||
693 | 106 | Comma-seperated list of DNS servers for this subnet. | ||
694 | 107 | |||
695 | 108 | managed | ||
696 | 109 | In MAAS 2.0+, all subnets are assumed to be managed by default. | ||
697 | 110 | |||
698 | 111 | Only managed subnets allow DHCP to be enabled on their related | ||
699 | 112 | dynamic ranges. (Thus, dynamic ranges become "informational | ||
700 | 113 | only"; an indication that another DHCP server is currently | ||
701 | 114 | handling them, or that MAAS will handle them when the subnet is | ||
702 | 115 | enabled for management.) | ||
703 | 116 | |||
704 | 117 | Managed subnets do not allow IP allocation by default. The | ||
705 | 118 | meaning of a "reserved" IP range is reversed for an unmanaged | ||
706 | 119 | subnet. (That is, for managed subnets, "reserved" means "MAAS | ||
707 | 120 | cannot allocate any IP address within this reserved block". For | ||
708 | 121 | unmanaged subnets, "reserved" means "MAAS must allocate IP | ||
709 | 122 | addresses only from reserved IP ranges". | ||
710 | 78 | """ | 123 | """ |
711 | 79 | form = SubnetForm(data=request.data) | 124 | form = SubnetForm(data=request.data) |
712 | 80 | if form.is_valid(): | 125 | if form.is_valid(): |
713 | @@ -100,7 +145,8 @@ | |||
714 | 100 | 145 | ||
715 | 101 | @classmethod | 146 | @classmethod |
716 | 102 | def space(cls, subnet): | 147 | def space(cls, subnet): |
718 | 103 | """Return the name of the space. | 148 | """\ |
719 | 149 | Return the name of the space. | ||
720 | 104 | 150 | ||
721 | 105 | Only the name is returned because the space endpoint will return | 151 | Only the name is returned because the space endpoint will return |
722 | 106 | a list of all subnets in that space. If this returned the subnet | 152 | a list of all subnets in that space. If this returned the subnet |
723 | @@ -109,7 +155,8 @@ | |||
724 | 109 | return subnet.space.get_name() | 155 | return subnet.space.get_name() |
725 | 110 | 156 | ||
726 | 111 | def read(self, request, subnet_id): | 157 | def read(self, request, subnet_id): |
728 | 112 | """Read subnet. | 158 | """\ |
729 | 159 | Read subnet. | ||
730 | 113 | 160 | ||
731 | 114 | Returns 404 if the subnet is not found. | 161 | Returns 404 if the subnet is not found. |
732 | 115 | """ | 162 | """ |
733 | @@ -117,19 +164,44 @@ | |||
734 | 117 | subnet_id, request.user, NODE_PERMISSION.VIEW) | 164 | subnet_id, request.user, NODE_PERMISSION.VIEW) |
735 | 118 | 165 | ||
736 | 119 | def update(self, request, subnet_id): | 166 | def update(self, request, subnet_id): |
750 | 120 | """Update subnet. | 167 | """\ |
751 | 121 | 168 | Update the specified subnet. | |
752 | 122 | :param name: Name of the subnet. | 169 | |
753 | 123 | :param description: Description of the subnet. | 170 | Please see the documentation for the 'create' operation for detailed |
754 | 124 | :param vlan: VLAN this subnet belongs to. | 171 | descriptions of each parameter. |
755 | 125 | :param space: Space this subnet is in. | 172 | |
756 | 126 | :param cidr: The network CIDR for this subnet. | 173 | Optional parameters |
757 | 127 | :param gateway_ip: The gateway IP address for this subnet. | 174 | ------------------- |
758 | 128 | :param rdns_mode: How reverse DNS is handled for this subnet. | 175 | |
759 | 129 | :param allow_proxy: Configure maas-proxy to allow requests from this \ | 176 | name |
760 | 130 | subnet. | 177 | Name of the subnet. |
761 | 131 | :param dns_servers: Comma-seperated list of DNS servers for this \ | 178 | |
762 | 132 | subnet. | 179 | description |
763 | 180 | Description of the subnet. | ||
764 | 181 | |||
765 | 182 | vlan | ||
766 | 183 | VLAN this subnet belongs to. | ||
767 | 184 | |||
768 | 185 | space | ||
769 | 186 | Space this subnet is in. | ||
770 | 187 | |||
771 | 188 | cidr | ||
772 | 189 | The network CIDR for this subnet. | ||
773 | 190 | |||
774 | 191 | gateway_ip | ||
775 | 192 | The gateway IP address for this subnet. | ||
776 | 193 | |||
777 | 194 | rdns_mode | ||
778 | 195 | How reverse DNS is handled for this subnet. | ||
779 | 196 | |||
780 | 197 | allow_proxy | ||
781 | 198 | Configure maas-proxy to allow requests from this subnet. | ||
782 | 199 | |||
783 | 200 | dns_servers | ||
784 | 201 | Comma-seperated list of DNS servers for this subnet. | ||
785 | 202 | |||
786 | 203 | managed | ||
787 | 204 | If False, MAAS should not manage this subnet. (Default: True) | ||
788 | 133 | 205 | ||
789 | 134 | Returns 404 if the subnet is not found. | 206 | Returns 404 if the subnet is not found. |
790 | 135 | """ | 207 | """ |
791 | @@ -142,7 +214,8 @@ | |||
792 | 142 | raise MAASAPIValidationError(form.errors) | 214 | raise MAASAPIValidationError(form.errors) |
793 | 143 | 215 | ||
794 | 144 | def delete(self, request, subnet_id): | 216 | def delete(self, request, subnet_id): |
796 | 145 | """Delete subnet. | 217 | """\ |
797 | 218 | Delete subnet. | ||
798 | 146 | 219 | ||
799 | 147 | Returns 404 if the subnet is not found. | 220 | Returns 404 if the subnet is not found. |
800 | 148 | """ | 221 | """ |
801 | @@ -153,7 +226,8 @@ | |||
802 | 153 | 226 | ||
803 | 154 | @operation(idempotent=True) | 227 | @operation(idempotent=True) |
804 | 155 | def reserved_ip_ranges(self, request, subnet_id): | 228 | def reserved_ip_ranges(self, request, subnet_id): |
806 | 156 | """Lists IP ranges currently reserved in the subnet. | 229 | """\ |
807 | 230 | Lists IP ranges currently reserved in the subnet. | ||
808 | 157 | 231 | ||
809 | 158 | Returns 404 if the subnet is not found. | 232 | Returns 404 if the subnet is not found. |
810 | 159 | """ | 233 | """ |
811 | @@ -163,7 +237,8 @@ | |||
812 | 163 | 237 | ||
813 | 164 | @operation(idempotent=True) | 238 | @operation(idempotent=True) |
814 | 165 | def unreserved_ip_ranges(self, request, subnet_id): | 239 | def unreserved_ip_ranges(self, request, subnet_id): |
816 | 166 | """Lists IP ranges currently unreserved in the subnet. | 240 | """\ |
817 | 241 | Lists IP ranges currently unreserved in the subnet. | ||
818 | 167 | 242 | ||
819 | 168 | Returns 404 if the subnet is not found. | 243 | Returns 404 if the subnet is not found. |
820 | 169 | """ | 244 | """ |
821 | @@ -174,22 +249,27 @@ | |||
822 | 174 | 249 | ||
823 | 175 | @operation(idempotent=True) | 250 | @operation(idempotent=True) |
824 | 176 | def statistics(self, request, subnet_id): | 251 | def statistics(self, request, subnet_id): |
826 | 177 | """ | 252 | """\ |
827 | 178 | Returns statistics for the specified subnet, including: | 253 | Returns statistics for the specified subnet, including: |
828 | 179 | 254 | ||
842 | 180 | num_available - the number of available IP addresses | 255 | num_available: the number of available IP addresses |
843 | 181 | largest_available - the largest number of contiguous free IP addresses | 256 | largest_available: the largest number of contiguous free IP addresses |
844 | 182 | num_unavailable - the number of unavailable IP addresses | 257 | num_unavailable: the number of unavailable IP addresses |
845 | 183 | total_addresses - the sum of the available plus unavailable addresses | 258 | total_addresses: the sum of the available plus unavailable addresses |
846 | 184 | usage - the (floating point) usage percentage of this subnet | 259 | usage: the (floating point) usage percentage of this subnet |
847 | 185 | usage_string - the (formatted unicode) usage percentage of this subnet | 260 | usage_string: the (formatted unicode) usage percentage of this subnet |
848 | 186 | ranges - the specific IP ranges present in ths subnet (if specified) | 261 | ranges: the specific IP ranges present in ths subnet (if specified) |
849 | 187 | 262 | ||
850 | 188 | Optional arguments: | 263 | Optional parameters |
851 | 189 | include_ranges: if True, includes detailed information | 264 | ------------------- |
852 | 190 | about the usage of this range. | 265 | |
853 | 191 | include_suggestions: if True, includes the suggested gateway and | 266 | include_ranges |
854 | 192 | dynamic range for this subnet, if it were to be configured. | 267 | If True, includes detailed information |
855 | 268 | about the usage of this range. | ||
856 | 269 | |||
857 | 270 | include_suggestions | ||
858 | 271 | If True, includes the suggested gateway and dynamic range for this | ||
859 | 272 | subnet, if it were to be configured. | ||
860 | 193 | 273 | ||
861 | 194 | Returns 404 if the subnet is not found. | 274 | Returns 404 if the subnet is not found. |
862 | 195 | """ | 275 | """ |
863 | @@ -208,14 +288,19 @@ | |||
864 | 208 | 288 | ||
865 | 209 | @operation(idempotent=True) | 289 | @operation(idempotent=True) |
866 | 210 | def ip_addresses(self, request, subnet_id): | 290 | def ip_addresses(self, request, subnet_id): |
868 | 211 | """ | 291 | """\ |
869 | 212 | Returns a summary of IP addresses assigned to this subnet. | 292 | Returns a summary of IP addresses assigned to this subnet. |
870 | 213 | 293 | ||
876 | 214 | Optional arguments: | 294 | Optional parameters |
877 | 215 | with_username: (default=True) if False, suppresses the display | 295 | ------------------- |
878 | 216 | of usernames associated with each address. | 296 | |
879 | 217 | with_node_summary: (default=True) if False, suppresses the display | 297 | with_username |
880 | 218 | of any node associated with each address. | 298 | If False, suppresses the display of usernames associated with each |
881 | 299 | address. (Default: True) | ||
882 | 300 | |||
883 | 301 | with_node_summary | ||
884 | 302 | If False, suppresses the display of any node associated with each | ||
885 | 303 | address. (Default: True) | ||
886 | 219 | """ | 304 | """ |
887 | 220 | subnet = Subnet.objects.get_subnet_or_404( | 305 | subnet = Subnet.objects.get_subnet_or_404( |
888 | 221 | subnet_id, request.user, NODE_PERMISSION.VIEW) | 306 | subnet_id, request.user, NODE_PERMISSION.VIEW) |
889 | 222 | 307 | ||
890 | === modified file 'src/maasserver/api/tags.py' | |||
891 | --- src/maasserver/api/tags.py 2016-04-27 00:55:47 +0000 | |||
892 | +++ src/maasserver/api/tags.py 2016-12-07 15:50:52 +0000 | |||
893 | @@ -37,7 +37,6 @@ | |||
894 | 37 | RegionController, | 37 | RegionController, |
895 | 38 | Tag, | 38 | Tag, |
896 | 39 | ) | 39 | ) |
897 | 40 | from maasserver.models.node import typecast_to_node_type | ||
898 | 41 | from maasserver.models.user import get_auth_tokens | 40 | from maasserver.models.user import get_auth_tokens |
899 | 42 | from maasserver.utils.orm import get_one | 41 | from maasserver.utils.orm import get_one |
900 | 43 | from piston3.utils import rc | 42 | from piston3.utils import rc |
901 | @@ -137,7 +136,7 @@ | |||
902 | 137 | self.fields = None | 136 | self.fields = None |
903 | 138 | tag = Tag.objects.get_tag_or_404(name=name, user=request.user) | 137 | tag = Tag.objects.get_tag_or_404(name=name, user=request.user) |
904 | 139 | return [ | 138 | return [ |
906 | 140 | typecast_to_node_type(node) | 139 | node.as_self() |
907 | 141 | for node in model.objects.get_nodes( | 140 | for node in model.objects.get_nodes( |
908 | 142 | request.user, NODE_PERMISSION.VIEW, | 141 | request.user, NODE_PERMISSION.VIEW, |
909 | 143 | from_nodes=tag.node_set.all()) | 142 | from_nodes=tag.node_set.all()) |
910 | 144 | 143 | ||
911 | === added file 'src/maasserver/api/tests/test_chassis.py' | |||
912 | --- src/maasserver/api/tests/test_chassis.py 1970-01-01 00:00:00 +0000 | |||
913 | +++ src/maasserver/api/tests/test_chassis.py 2016-12-07 15:50:52 +0000 | |||
914 | @@ -0,0 +1,127 @@ | |||
915 | 1 | # Copyright 2016 Canonical Ltd. This software is licensed under the | ||
916 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
917 | 3 | |||
918 | 4 | """Tests for chassis API.""" | ||
919 | 5 | |||
920 | 6 | __all__ = [] | ||
921 | 7 | |||
922 | 8 | import http.client | ||
923 | 9 | |||
924 | 10 | from django.core.urlresolvers import reverse | ||
925 | 11 | from maasserver.enum import ( | ||
926 | 12 | NODE_STATUS, | ||
927 | 13 | NODE_TYPE, | ||
928 | 14 | ) | ||
929 | 15 | from maasserver.testing.api import APITestCase | ||
930 | 16 | from maasserver.testing.factory import factory | ||
931 | 17 | from maasserver.utils.converters import json_load_bytes | ||
932 | 18 | from maasserver.utils.orm import reload_object | ||
933 | 19 | |||
934 | 20 | |||
935 | 21 | class TestChassisAPI(APITestCase.ForUser): | ||
936 | 22 | |||
937 | 23 | def test_handler_path(self): | ||
938 | 24 | self.assertEqual( | ||
939 | 25 | '/api/2.0/chassis/', reverse('chassis_handler')) | ||
940 | 26 | |||
941 | 27 | def create_chassis(self, owner, nb=3): | ||
942 | 28 | return [ | ||
943 | 29 | factory.make_Node( | ||
944 | 30 | interface=True, node_type=NODE_TYPE.CHASSIS, owner=owner) | ||
945 | 31 | for _ in range(nb) | ||
946 | 32 | ] | ||
947 | 33 | |||
948 | 34 | def test_read_lists_chassis(self): | ||
949 | 35 | # The api allows for fetching the list of chassis. | ||
950 | 36 | chassis = self.create_chassis(owner=self.user) | ||
951 | 37 | factory.make_Node( | ||
952 | 38 | status=NODE_STATUS.ALLOCATED, owner=self.user) | ||
953 | 39 | response = self.client.get(reverse('chassis_handler')) | ||
954 | 40 | parsed_result = json_load_bytes(response.content) | ||
955 | 41 | |||
956 | 42 | self.assertEqual(http.client.OK, response.status_code) | ||
957 | 43 | self.assertItemsEqual( | ||
958 | 44 | [chassi.system_id for chassi in chassis], | ||
959 | 45 | [chassi.get('system_id') for chassi in parsed_result]) | ||
960 | 46 | |||
961 | 47 | def test_read_ignores_nodes(self): | ||
962 | 48 | factory.make_Node( | ||
963 | 49 | status=NODE_STATUS.ALLOCATED, owner=self.user) | ||
964 | 50 | response = self.client.get(reverse('chassis_handler')) | ||
965 | 51 | parsed_result = json_load_bytes(response.content) | ||
966 | 52 | |||
967 | 53 | self.assertEqual(http.client.OK, response.status_code) | ||
968 | 54 | self.assertEqual( | ||
969 | 55 | [], | ||
970 | 56 | [chassi.get('system_id') for chassi in parsed_result]) | ||
971 | 57 | |||
972 | 58 | def test_read_with_id_returns_matching_chassis(self): | ||
973 | 59 | # The "list" operation takes optional "id" parameters. Only | ||
974 | 60 | # chassis with matching ids will be returned. | ||
975 | 61 | chassis = self.create_chassis(owner=self.user) | ||
976 | 62 | ids = [chassi.system_id for chassi in chassis] | ||
977 | 63 | matching_id = ids[0] | ||
978 | 64 | response = self.client.get(reverse('chassis_handler'), { | ||
979 | 65 | 'id': [matching_id], | ||
980 | 66 | }) | ||
981 | 67 | parsed_result = json_load_bytes(response.content) | ||
982 | 68 | self.assertItemsEqual( | ||
983 | 69 | [matching_id], | ||
984 | 70 | [chassi.get('system_id') for chassi in parsed_result]) | ||
985 | 71 | |||
986 | 72 | def test_read_returns_limited_fields(self): | ||
987 | 73 | self.create_chassis(owner=self.user) | ||
988 | 74 | response = self.client.get(reverse('chassis_handler')) | ||
989 | 75 | parsed_result = json_load_bytes(response.content) | ||
990 | 76 | self.assertItemsEqual( | ||
991 | 77 | [ | ||
992 | 78 | 'hostname', | ||
993 | 79 | 'system_id', | ||
994 | 80 | 'cpu_count', | ||
995 | 81 | 'memory', | ||
996 | 82 | 'chassis_type', | ||
997 | 83 | 'node_type', | ||
998 | 84 | 'node_type_name', | ||
999 | 85 | 'resource_uri', | ||
1000 | 86 | ], | ||
1001 | 87 | list(parsed_result[0])) | ||
1002 | 88 | |||
1003 | 89 | |||
1004 | 90 | def get_chassi_uri(chassis): | ||
1005 | 91 | """Return a chassis URI on the API.""" | ||
1006 | 92 | return reverse('chassi_handler', args=[chassis.system_id]) | ||
1007 | 93 | |||
1008 | 94 | |||
1009 | 95 | class TestChassiAPI(APITestCase.ForUser): | ||
1010 | 96 | |||
1011 | 97 | def test_handler_path(self): | ||
1012 | 98 | system_id = factory.make_name('system-id') | ||
1013 | 99 | self.assertEqual( | ||
1014 | 100 | '/api/2.0/chassis/%s/' % system_id, | ||
1015 | 101 | reverse('chassi_handler', args=[system_id])) | ||
1016 | 102 | |||
1017 | 103 | def test_GET_reads_chassis(self): | ||
1018 | 104 | chassis = factory.make_Node( | ||
1019 | 105 | node_type=NODE_TYPE.CHASSIS, owner=self.user) | ||
1020 | 106 | |||
1021 | 107 | response = self.client.get(get_chassi_uri(chassis)) | ||
1022 | 108 | self.assertEqual( | ||
1023 | 109 | http.client.OK, response.status_code, response.content) | ||
1024 | 110 | parsed_chassis = json_load_bytes(response.content) | ||
1025 | 111 | self.assertEqual(chassis.system_id, parsed_chassis["system_id"]) | ||
1026 | 112 | |||
1027 | 113 | def test_DELETE_removes_chassis(self): | ||
1028 | 114 | self.become_admin() | ||
1029 | 115 | chassis = factory.make_Node( | ||
1030 | 116 | node_type=NODE_TYPE.CHASSIS, owner=self.user) | ||
1031 | 117 | response = self.client.delete(get_chassi_uri(chassis)) | ||
1032 | 118 | self.assertEqual( | ||
1033 | 119 | http.client.NO_CONTENT, response.status_code, response.content) | ||
1034 | 120 | self.assertIsNone(reload_object(chassis)) | ||
1035 | 121 | |||
1036 | 122 | def test_DELETE_rejects_deletion_if_not_permitted(self): | ||
1037 | 123 | chassis = factory.make_Node( | ||
1038 | 124 | node_type=NODE_TYPE.CHASSIS, owner=factory.make_User()) | ||
1039 | 125 | response = self.client.delete(get_chassi_uri(chassis)) | ||
1040 | 126 | self.assertEqual(http.client.FORBIDDEN, response.status_code) | ||
1041 | 127 | self.assertEqual(chassis, reload_object(chassis)) | ||
1042 | 0 | 128 | ||
1043 | === modified file 'src/maasserver/api/tests/test_doc.py' | |||
1044 | --- src/maasserver/api/tests/test_doc.py 2016-08-31 13:52:59 +0000 | |||
1045 | +++ src/maasserver/api/tests/test_doc.py 2016-12-07 15:50:52 +0000 | |||
1046 | @@ -8,6 +8,7 @@ | |||
1047 | 8 | import http.client | 8 | import http.client |
1048 | 9 | from inspect import getdoc | 9 | from inspect import getdoc |
1049 | 10 | from io import StringIO | 10 | from io import StringIO |
1050 | 11 | import random | ||
1051 | 11 | import sys | 12 | import sys |
1052 | 12 | import types | 13 | import types |
1053 | 13 | from unittest.mock import sentinel | 14 | from unittest.mock import sentinel |
1054 | @@ -49,7 +50,7 @@ | |||
1055 | 49 | from piston3.doc import HandlerDocumentation | 50 | from piston3.doc import HandlerDocumentation |
1056 | 50 | from piston3.handler import BaseHandler | 51 | from piston3.handler import BaseHandler |
1057 | 51 | from piston3.resource import Resource | 52 | from piston3.resource import Resource |
1059 | 52 | from provisioningserver.power.schema import make_json_field | 53 | from provisioningserver.drivers.power import PowerDriverRegistry |
1060 | 53 | from testtools.matchers import ( | 54 | from testtools.matchers import ( |
1061 | 54 | AfterPreprocessing, | 55 | AfterPreprocessing, |
1062 | 55 | AllMatch, | 56 | AllMatch, |
1063 | @@ -416,22 +417,19 @@ | |||
1064 | 416 | self.assertThat(doc, ContainsAll(["Power types", "IPMI"])) | 417 | self.assertThat(doc, ContainsAll(["Power types", "IPMI"])) |
1065 | 417 | 418 | ||
1066 | 418 | def test__generate_power_types_doc_generates_describes_power_type(self): | 419 | def test__generate_power_types_doc_generates_describes_power_type(self): |
1079 | 419 | name = factory.make_name('name') | 420 | power_driver = random.choice([ |
1080 | 420 | description = factory.make_name('description') | 421 | driver |
1081 | 421 | param_name = factory.make_name('param_name') | 422 | for _, driver in PowerDriverRegistry |
1082 | 422 | param_description = factory.make_name('param_description') | 423 | if len(driver.settings) > 0 |
1083 | 423 | json_fields = [{ | 424 | ]) |
1072 | 424 | 'name': name, | ||
1073 | 425 | 'description': description, | ||
1074 | 426 | 'fields': [ | ||
1075 | 427 | make_json_field(param_name, param_description), | ||
1076 | 428 | ], | ||
1077 | 429 | }] | ||
1078 | 430 | self.patch(doc_module, "JSON_POWER_TYPE_PARAMETERS", json_fields) | ||
1084 | 431 | doc = generate_power_types_doc() | 425 | doc = generate_power_types_doc() |
1085 | 432 | self.assertThat( | 426 | self.assertThat( |
1086 | 433 | doc, | 427 | doc, |
1088 | 434 | ContainsAll([name, description, param_name, param_description])) | 428 | ContainsAll([ |
1089 | 429 | power_driver.name, | ||
1090 | 430 | power_driver.description, | ||
1091 | 431 | power_driver.settings[0]['name'], | ||
1092 | 432 | power_driver.settings[0]['label']])) | ||
1093 | 435 | 433 | ||
1094 | 436 | 434 | ||
1095 | 437 | class TestDescribeCanonical(MAASTestCase): | 435 | class TestDescribeCanonical(MAASTestCase): |
1096 | 438 | 436 | ||
1097 | === modified file 'src/maasserver/api/tests/test_nodes.py' | |||
1098 | --- src/maasserver/api/tests/test_nodes.py 2016-10-28 08:43:09 +0000 | |||
1099 | +++ src/maasserver/api/tests/test_nodes.py 2016-12-07 15:50:52 +0000 | |||
1100 | @@ -304,7 +304,6 @@ | |||
1101 | 304 | response = self.client.get(reverse('nodes_handler')) | 304 | response = self.client.get(reverse('nodes_handler')) |
1102 | 305 | parsed_result = json.loads( | 305 | parsed_result = json.loads( |
1103 | 306 | response.content.decode(settings.DEFAULT_CHARSET)) | 306 | response.content.decode(settings.DEFAULT_CHARSET)) |
1104 | 307 | |||
1105 | 308 | self.assertEqual(http.client.OK, response.status_code) | 307 | self.assertEqual(http.client.OK, response.status_code) |
1106 | 309 | self.assertItemsEqual(system_ids, extract_system_ids(parsed_result)) | 308 | self.assertItemsEqual(system_ids, extract_system_ids(parsed_result)) |
1107 | 310 | 309 | ||
1108 | 311 | 310 | ||
1109 | === added file 'src/maasserver/api/tests/test_storage.py' | |||
1110 | --- src/maasserver/api/tests/test_storage.py 1970-01-01 00:00:00 +0000 | |||
1111 | +++ src/maasserver/api/tests/test_storage.py 2016-12-07 15:50:52 +0000 | |||
1112 | @@ -0,0 +1,125 @@ | |||
1113 | 1 | # Copyright 2016 Canonical Ltd. This software is licensed under the | ||
1114 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
1115 | 3 | |||
1116 | 4 | """Tests for storage API.""" | ||
1117 | 5 | |||
1118 | 6 | __all__ = [] | ||
1119 | 7 | |||
1120 | 8 | import http.client | ||
1121 | 9 | |||
1122 | 10 | from django.core.urlresolvers import reverse | ||
1123 | 11 | from maasserver.enum import ( | ||
1124 | 12 | NODE_STATUS, | ||
1125 | 13 | NODE_TYPE, | ||
1126 | 14 | ) | ||
1127 | 15 | from maasserver.testing.api import APITestCase | ||
1128 | 16 | from maasserver.testing.factory import factory | ||
1129 | 17 | from maasserver.utils.converters import json_load_bytes | ||
1130 | 18 | from maasserver.utils.orm import reload_object | ||
1131 | 19 | |||
1132 | 20 | |||
1133 | 21 | class TestStoragesAPI(APITestCase.ForUser): | ||
1134 | 22 | |||
1135 | 23 | def test_handler_path(self): | ||
1136 | 24 | self.assertEqual( | ||
1137 | 25 | '/api/2.0/storages/', reverse('storages_handler')) | ||
1138 | 26 | |||
1139 | 27 | def create_storages(self, owner, nb=3): | ||
1140 | 28 | return [ | ||
1141 | 29 | factory.make_Node( | ||
1142 | 30 | interface=True, node_type=NODE_TYPE.STORAGE, owner=owner) | ||
1143 | 31 | for _ in range(nb) | ||
1144 | 32 | ] | ||
1145 | 33 | |||
1146 | 34 | def test_read_lists_storage(self): | ||
1147 | 35 | # The api allows for fetching the list of storages. | ||
1148 | 36 | storages = self.create_storages(owner=self.user) | ||
1149 | 37 | factory.make_Node( | ||
1150 | 38 | status=NODE_STATUS.ALLOCATED, owner=self.user) | ||
1151 | 39 | response = self.client.get(reverse('storages_handler')) | ||
1152 | 40 | parsed_result = json_load_bytes(response.content) | ||
1153 | 41 | |||
1154 | 42 | self.assertEqual(http.client.OK, response.status_code) | ||
1155 | 43 | self.assertItemsEqual( | ||
1156 | 44 | [storage.system_id for storage in storages], | ||
1157 | 45 | [storage.get('system_id') for storage in parsed_result]) | ||
1158 | 46 | |||
1159 | 47 | def test_read_ignores_nodes(self): | ||
1160 | 48 | factory.make_Node( | ||
1161 | 49 | status=NODE_STATUS.ALLOCATED, owner=self.user) | ||
1162 | 50 | response = self.client.get(reverse('storages_handler')) | ||
1163 | 51 | parsed_result = json_load_bytes(response.content) | ||
1164 | 52 | |||
1165 | 53 | self.assertEqual(http.client.OK, response.status_code) | ||
1166 | 54 | self.assertEqual( | ||
1167 | 55 | [], | ||
1168 | 56 | [storage.get('system_id') for storage in parsed_result]) | ||
1169 | 57 | |||
1170 | 58 | def test_read_with_id_returns_matching_storage(self): | ||
1171 | 59 | # The "list" operation takes optional "id" parameters. Only | ||
1172 | 60 | # storages with matching ids will be returned. | ||
1173 | 61 | storages = self.create_storages(owner=self.user) | ||
1174 | 62 | ids = [storage.system_id for storage in storages] | ||
1175 | 63 | matching_id = ids[0] | ||
1176 | 64 | response = self.client.get(reverse('storages_handler'), { | ||
1177 | 65 | 'id': [matching_id], | ||
1178 | 66 | }) | ||
1179 | 67 | parsed_result = json_load_bytes(response.content) | ||
1180 | 68 | self.assertItemsEqual( | ||
1181 | 69 | [matching_id], | ||
1182 | 70 | [storage.get('system_id') for storage in parsed_result]) | ||
1183 | 71 | |||
1184 | 72 | def test_read_returns_limited_fields(self): | ||
1185 | 73 | self.create_storages(owner=self.user) | ||
1186 | 74 | response = self.client.get(reverse('storages_handler')) | ||
1187 | 75 | parsed_result = json_load_bytes(response.content) | ||
1188 | 76 | self.assertItemsEqual( | ||
1189 | 77 | [ | ||
1190 | 78 | 'hostname', | ||
1191 | 79 | 'system_id', | ||
1192 | 80 | 'storage_type', | ||
1193 | 81 | 'node_type', | ||
1194 | 82 | 'node_type_name', | ||
1195 | 83 | 'resource_uri', | ||
1196 | 84 | ], | ||
1197 | 85 | list(parsed_result[0])) | ||
1198 | 86 | |||
1199 | 87 | |||
1200 | 88 | def get_storage_uri(storage): | ||
1201 | 89 | """Return a storage's URI on the API.""" | ||
1202 | 90 | return reverse('storage_handler', args=[storage.system_id]) | ||
1203 | 91 | |||
1204 | 92 | |||
1205 | 93 | class TestStorageAPI(APITestCase.ForUser): | ||
1206 | 94 | |||
1207 | 95 | def test_handler_path(self): | ||
1208 | 96 | system_id = factory.make_name('system-id') | ||
1209 | 97 | self.assertEqual( | ||
1210 | 98 | '/api/2.0/storages/%s/' % system_id, | ||
1211 | 99 | reverse('storage_handler', args=[system_id])) | ||
1212 | 100 | |||
1213 | 101 | def test_GET_reads_storage(self): | ||
1214 | 102 | storage = factory.make_Node( | ||
1215 | 103 | node_type=NODE_TYPE.STORAGE, owner=self.user) | ||
1216 | 104 | |||
1217 | 105 | response = self.client.get(get_storage_uri(storage)) | ||
1218 | 106 | self.assertEqual( | ||
1219 | 107 | http.client.OK, response.status_code, response.content) | ||
1220 | 108 | parsed_storage = json_load_bytes(response.content) | ||
1221 | 109 | self.assertEqual(storage.system_id, parsed_storage["system_id"]) | ||
1222 | 110 | |||
1223 | 111 | def test_DELETE_removes_storage(self): | ||
1224 | 112 | self.become_admin() | ||
1225 | 113 | storage = factory.make_Node( | ||
1226 | 114 | node_type=NODE_TYPE.STORAGE, owner=self.user) | ||
1227 | 115 | response = self.client.delete(get_storage_uri(storage)) | ||
1228 | 116 | self.assertEqual( | ||
1229 | 117 | http.client.NO_CONTENT, response.status_code, response.content) | ||
1230 | 118 | self.assertIsNone(reload_object(storage)) | ||
1231 | 119 | |||
1232 | 120 | def test_DELETE_rejects_deletion_if_not_permitted(self): | ||
1233 | 121 | storage = factory.make_Node( | ||
1234 | 122 | node_type=NODE_TYPE.STORAGE, owner=factory.make_User()) | ||
1235 | 123 | response = self.client.delete(get_storage_uri(storage)) | ||
1236 | 124 | self.assertEqual(http.client.FORBIDDEN, response.status_code) | ||
1237 | 125 | self.assertEqual(storage, reload_object(storage)) | ||
1238 | 0 | 126 | ||
1239 | === modified file 'src/maasserver/api/tests/test_subnets.py' | |||
1240 | --- src/maasserver/api/tests/test_subnets.py 2016-10-20 21:30:58 +0000 | |||
1241 | +++ src/maasserver/api/tests/test_subnets.py 2016-12-07 15:50:52 +0000 | |||
1242 | @@ -87,6 +87,7 @@ | |||
1243 | 87 | rdns_mode = factory.pick_choice(RDNS_MODE_CHOICES) | 87 | rdns_mode = factory.pick_choice(RDNS_MODE_CHOICES) |
1244 | 88 | allow_proxy = factory.pick_bool() | 88 | allow_proxy = factory.pick_bool() |
1245 | 89 | gateway_ip = factory.pick_ip_in_network(network) | 89 | gateway_ip = factory.pick_ip_in_network(network) |
1246 | 90 | managed = factory.pick_bool() | ||
1247 | 90 | dns_servers = [] | 91 | dns_servers = [] |
1248 | 91 | for _ in range(2): | 92 | for _ in range(2): |
1249 | 92 | dns_servers.append( | 93 | dns_servers.append( |
1250 | @@ -102,6 +103,7 @@ | |||
1251 | 102 | "dns_servers": ','.join(dns_servers), | 103 | "dns_servers": ','.join(dns_servers), |
1252 | 103 | "rdns_mode": rdns_mode, | 104 | "rdns_mode": rdns_mode, |
1253 | 104 | "allow_proxy": allow_proxy, | 105 | "allow_proxy": allow_proxy, |
1254 | 106 | "managed": managed, | ||
1255 | 105 | }) | 107 | }) |
1256 | 106 | self.assertEqual( | 108 | self.assertEqual( |
1257 | 107 | http.client.OK, response.status_code, response.content) | 109 | http.client.OK, response.status_code, response.content) |
1258 | @@ -115,6 +117,7 @@ | |||
1259 | 115 | self.assertEqual(dns_servers, created_subnet['dns_servers']) | 117 | self.assertEqual(dns_servers, created_subnet['dns_servers']) |
1260 | 116 | self.assertEqual(rdns_mode, created_subnet['rdns_mode']) | 118 | self.assertEqual(rdns_mode, created_subnet['rdns_mode']) |
1261 | 117 | self.assertEqual(allow_proxy, created_subnet['allow_proxy']) | 119 | self.assertEqual(allow_proxy, created_subnet['allow_proxy']) |
1262 | 120 | self.assertEqual(managed, created_subnet['managed']) | ||
1263 | 118 | 121 | ||
1264 | 119 | def test_create_defaults_to_allow_proxy(self): | 122 | def test_create_defaults_to_allow_proxy(self): |
1265 | 120 | self.become_admin() | 123 | self.become_admin() |
1266 | @@ -151,7 +154,43 @@ | |||
1267 | 151 | self.assertEqual(gateway_ip, created_subnet['gateway_ip']) | 154 | self.assertEqual(gateway_ip, created_subnet['gateway_ip']) |
1268 | 152 | self.assertEqual(dns_servers, created_subnet['dns_servers']) | 155 | self.assertEqual(dns_servers, created_subnet['dns_servers']) |
1269 | 153 | self.assertEqual(rdns_mode, created_subnet['rdns_mode']) | 156 | self.assertEqual(rdns_mode, created_subnet['rdns_mode']) |
1271 | 154 | self.assertEqual(True, created_subnet['allow_proxy']) | 157 | |
1272 | 158 | def test_create_defaults_to_managed(self): | ||
1273 | 159 | self.become_admin() | ||
1274 | 160 | subnet_name = factory.make_name("subnet") | ||
1275 | 161 | vlan = factory.make_VLAN() | ||
1276 | 162 | space = factory.make_Space() | ||
1277 | 163 | network = factory.make_ip4_or_6_network() | ||
1278 | 164 | cidr = str(network.cidr) | ||
1279 | 165 | rdns_mode = factory.pick_choice(RDNS_MODE_CHOICES) | ||
1280 | 166 | gateway_ip = factory.pick_ip_in_network(network) | ||
1281 | 167 | dns_servers = [] | ||
1282 | 168 | for _ in range(2): | ||
1283 | 169 | dns_servers.append( | ||
1284 | 170 | factory.pick_ip_in_network( | ||
1285 | 171 | network, but_not=[gateway_ip] + dns_servers)) | ||
1286 | 172 | uri = get_subnets_uri() | ||
1287 | 173 | response = self.client.post(uri, { | ||
1288 | 174 | "name": subnet_name, | ||
1289 | 175 | "vlan": vlan.id, | ||
1290 | 176 | "space": space.id, | ||
1291 | 177 | "cidr": cidr, | ||
1292 | 178 | "gateway_ip": gateway_ip, | ||
1293 | 179 | "dns_servers": ','.join(dns_servers), | ||
1294 | 180 | "rdns_mode": rdns_mode, | ||
1295 | 181 | }) | ||
1296 | 182 | self.assertEqual( | ||
1297 | 183 | http.client.OK, response.status_code, response.content) | ||
1298 | 184 | created_subnet = json.loads( | ||
1299 | 185 | response.content.decode(settings.DEFAULT_CHARSET)) | ||
1300 | 186 | self.assertEqual(subnet_name, created_subnet['name']) | ||
1301 | 187 | self.assertEqual(vlan.vid, created_subnet['vlan']['vid']) | ||
1302 | 188 | self.assertEqual(space.get_name(), created_subnet['space']) | ||
1303 | 189 | self.assertEqual(cidr, created_subnet['cidr']) | ||
1304 | 190 | self.assertEqual(gateway_ip, created_subnet['gateway_ip']) | ||
1305 | 191 | self.assertEqual(dns_servers, created_subnet['dns_servers']) | ||
1306 | 192 | self.assertEqual(rdns_mode, created_subnet['rdns_mode']) | ||
1307 | 193 | self.assertEqual(True, created_subnet['managed']) | ||
1308 | 155 | 194 | ||
1309 | 156 | def test_create_admin_only(self): | 195 | def test_create_admin_only(self): |
1310 | 157 | subnet_name = factory.make_name("subnet") | 196 | subnet_name = factory.make_name("subnet") |
1311 | @@ -200,6 +239,7 @@ | |||
1312 | 200 | "cidr": Equals(subnet.cidr), | 239 | "cidr": Equals(subnet.cidr), |
1313 | 201 | "gateway_ip": Equals(subnet.gateway_ip), | 240 | "gateway_ip": Equals(subnet.gateway_ip), |
1314 | 202 | "dns_servers": Equals(subnet.dns_servers), | 241 | "dns_servers": Equals(subnet.dns_servers), |
1315 | 242 | "managed": Equals(subnet.managed), | ||
1316 | 203 | })) | 243 | })) |
1317 | 204 | 244 | ||
1318 | 205 | def test_read_404_when_bad_id(self): | 245 | def test_read_404_when_bad_id(self): |
1319 | @@ -232,20 +272,24 @@ | |||
1320 | 232 | new_name = factory.make_name("subnet") | 272 | new_name = factory.make_name("subnet") |
1321 | 233 | new_rdns_mode = factory.pick_choice(RDNS_MODE_CHOICES) | 273 | new_rdns_mode = factory.pick_choice(RDNS_MODE_CHOICES) |
1322 | 234 | new_allow_proxy = factory.pick_bool() | 274 | new_allow_proxy = factory.pick_bool() |
1323 | 275 | new_managed = factory.pick_bool() | ||
1324 | 235 | uri = get_subnet_uri(subnet) | 276 | uri = get_subnet_uri(subnet) |
1325 | 236 | response = self.client.put(uri, { | 277 | response = self.client.put(uri, { |
1326 | 237 | "name": new_name, | 278 | "name": new_name, |
1327 | 238 | "rdns_mode": new_rdns_mode, | 279 | "rdns_mode": new_rdns_mode, |
1328 | 239 | "allow_proxy": new_allow_proxy, | 280 | "allow_proxy": new_allow_proxy, |
1329 | 281 | "managed": new_managed, | ||
1330 | 240 | }) | 282 | }) |
1331 | 241 | self.assertEqual( | 283 | self.assertEqual( |
1332 | 242 | http.client.OK, response.status_code, response.content) | 284 | http.client.OK, response.status_code, response.content) |
1333 | 243 | self.assertEqual( | 285 | self.assertEqual( |
1334 | 244 | new_name, json.loads( | 286 | new_name, json.loads( |
1335 | 245 | response.content.decode(settings.DEFAULT_CHARSET))['name']) | 287 | response.content.decode(settings.DEFAULT_CHARSET))['name']) |
1339 | 246 | self.assertEqual(new_name, reload_object(subnet).name) | 288 | subnet = reload_object(subnet) |
1340 | 247 | self.assertEqual(new_rdns_mode, reload_object(subnet).rdns_mode) | 289 | self.assertEqual(new_name, subnet.name) |
1341 | 248 | self.assertEqual(new_allow_proxy, reload_object(subnet).allow_proxy) | 290 | self.assertEqual(new_rdns_mode, subnet.rdns_mode) |
1342 | 291 | self.assertEqual(new_allow_proxy, subnet.allow_proxy) | ||
1343 | 292 | self.assertEqual(new_managed, subnet.managed) | ||
1344 | 249 | 293 | ||
1345 | 250 | def test_update_admin_only(self): | 294 | def test_update_admin_only(self): |
1346 | 251 | subnet = factory.make_Subnet() | 295 | subnet = factory.make_Subnet() |
1347 | 252 | 296 | ||
1348 | === modified file 'src/maasserver/api/tests/test_vlans.py' | |||
1349 | --- src/maasserver/api/tests/test_vlans.py 2016-05-24 21:29:53 +0000 | |||
1350 | +++ src/maasserver/api/tests/test_vlans.py 2016-12-07 15:50:52 +0000 | |||
1351 | @@ -84,6 +84,29 @@ | |||
1352 | 84 | self.assertEqual(vid, response_data['vid']) | 84 | self.assertEqual(vid, response_data['vid']) |
1353 | 85 | self.assertEqual(mtu, response_data['mtu']) | 85 | self.assertEqual(mtu, response_data['mtu']) |
1354 | 86 | 86 | ||
1355 | 87 | def test_create_with_relay_vlan(self): | ||
1356 | 88 | self.become_admin() | ||
1357 | 89 | fabric = factory.make_Fabric() | ||
1358 | 90 | vlan_name = factory.make_name("fabric") | ||
1359 | 91 | vid = random.randint(1, 1000) | ||
1360 | 92 | mtu = random.randint(552, 1500) | ||
1361 | 93 | relay_vlan = factory.make_VLAN() | ||
1362 | 94 | uri = get_vlans_uri(fabric) | ||
1363 | 95 | response = self.client.post(uri, { | ||
1364 | 96 | "name": vlan_name, | ||
1365 | 97 | "vid": vid, | ||
1366 | 98 | "mtu": mtu, | ||
1367 | 99 | "relay_vlan": relay_vlan.id, | ||
1368 | 100 | }) | ||
1369 | 101 | self.assertEqual( | ||
1370 | 102 | http.client.OK, response.status_code, response.content) | ||
1371 | 103 | response_data = json.loads( | ||
1372 | 104 | response.content.decode(settings.DEFAULT_CHARSET)) | ||
1373 | 105 | self.assertEqual(vlan_name, response_data['name']) | ||
1374 | 106 | self.assertEqual(vid, response_data['vid']) | ||
1375 | 107 | self.assertEqual(mtu, response_data['mtu']) | ||
1376 | 108 | self.assertEqual(relay_vlan.vid, response_data['relay_vlan']['vid']) | ||
1377 | 109 | |||
1378 | 87 | def test_create_admin_only(self): | 110 | def test_create_admin_only(self): |
1379 | 88 | fabric = factory.make_Fabric() | 111 | fabric = factory.make_Fabric() |
1380 | 89 | vlan_name = factory.make_name("fabric") | 112 | vlan_name = factory.make_name("fabric") |
1381 | @@ -182,6 +205,23 @@ | |||
1382 | 182 | self.assertEqual(new_vid, parsed_vlan['vid']) | 205 | self.assertEqual(new_vid, parsed_vlan['vid']) |
1383 | 183 | self.assertEqual(new_vid, vlan.vid) | 206 | self.assertEqual(new_vid, vlan.vid) |
1384 | 184 | 207 | ||
1385 | 208 | def test_update_sets_relay_vlan(self): | ||
1386 | 209 | self.become_admin() | ||
1387 | 210 | fabric = factory.make_Fabric() | ||
1388 | 211 | vlan = factory.make_VLAN(fabric=fabric) | ||
1389 | 212 | uri = get_vlan_uri(vlan) | ||
1390 | 213 | relay_vlan = factory.make_VLAN() | ||
1391 | 214 | response = self.client.put(uri, { | ||
1392 | 215 | "relay_vlan": relay_vlan.id, | ||
1393 | 216 | }) | ||
1394 | 217 | self.assertEqual( | ||
1395 | 218 | http.client.OK, response.status_code, response.content) | ||
1396 | 219 | parsed_vlan = json.loads( | ||
1397 | 220 | response.content.decode(settings.DEFAULT_CHARSET)) | ||
1398 | 221 | vlan = reload_object(vlan) | ||
1399 | 222 | self.assertEqual(relay_vlan.vid, parsed_vlan['relay_vlan']['vid']) | ||
1400 | 223 | self.assertEqual(relay_vlan, vlan.relay_vlan) | ||
1401 | 224 | |||
1402 | 185 | def test_update_with_fabric(self): | 225 | def test_update_with_fabric(self): |
1403 | 186 | self.become_admin() | 226 | self.become_admin() |
1404 | 187 | fabric = factory.make_Fabric() | 227 | fabric = factory.make_Fabric() |
1405 | 188 | 228 | ||
1406 | === modified file 'src/maasserver/api/vlans.py' | |||
1407 | --- src/maasserver/api/vlans.py 2016-04-27 20:40:24 +0000 | |||
1408 | +++ src/maasserver/api/vlans.py 2016-12-07 15:50:52 +0000 | |||
1409 | @@ -26,6 +26,7 @@ | |||
1410 | 26 | 'secondary_rack', | 26 | 'secondary_rack', |
1411 | 27 | 'dhcp_on', | 27 | 'dhcp_on', |
1412 | 28 | 'external_dhcp', | 28 | 'external_dhcp', |
1413 | 29 | 'relay_vlan', | ||
1414 | 29 | ) | 30 | ) |
1415 | 30 | 31 | ||
1416 | 31 | 32 | ||
1417 | @@ -165,12 +166,18 @@ | |||
1418 | 165 | :type vid: integer | 166 | :type vid: integer |
1419 | 166 | :param mtu: The MTU to use on the VLAN. | 167 | :param mtu: The MTU to use on the VLAN. |
1420 | 167 | :type mtu: integer | 168 | :type mtu: integer |
1422 | 168 | :Param dhcp_on: Whether or not DHCP should be managed on the VLAN. | 169 | :param dhcp_on: Whether or not DHCP should be managed on the VLAN. |
1423 | 169 | :type dhcp_on: boolean | 170 | :type dhcp_on: boolean |
1424 | 170 | :param primary_rack: The primary rack controller managing the VLAN. | 171 | :param primary_rack: The primary rack controller managing the VLAN. |
1425 | 171 | :type primary_rack: system_id | 172 | :type primary_rack: system_id |
1426 | 172 | :param secondary_rack: The secondary rack controller manging the VLAN. | 173 | :param secondary_rack: The secondary rack controller manging the VLAN. |
1427 | 173 | :type secondary_rack: system_id | 174 | :type secondary_rack: system_id |
1428 | 175 | :param relay_vlan: Only set when this VLAN will be using a DHCP relay | ||
1429 | 176 | to forward DHCP requests to another VLAN that MAAS is or will run | ||
1430 | 177 | the DHCP server. MAAS will not run the DHCP relay itself, it must | ||
1431 | 178 | be configured to proxy reqests to the primary and/or secondary | ||
1432 | 179 | rack controller interfaces for the VLAN specified in this field. | ||
1433 | 180 | :type relay_vlan: ID of VLAN | ||
1434 | 174 | 181 | ||
1435 | 175 | Returns 404 if the fabric or VLAN is not found. | 182 | Returns 404 if the fabric or VLAN is not found. |
1436 | 176 | """ | 183 | """ |
1437 | 177 | 184 | ||
1438 | === modified file 'src/maasserver/bootresources.py' | |||
1439 | --- src/maasserver/bootresources.py 2016-10-28 15:58:32 +0000 | |||
1440 | +++ src/maasserver/bootresources.py 2016-12-07 15:50:52 +0000 | |||
1441 | @@ -60,6 +60,7 @@ | |||
1442 | 60 | BootResourceSet, | 60 | BootResourceSet, |
1443 | 61 | BootSourceSelection, | 61 | BootSourceSelection, |
1444 | 62 | Config, | 62 | Config, |
1445 | 63 | Event, | ||
1446 | 63 | LargeFile, | 64 | LargeFile, |
1447 | 64 | ) | 65 | ) |
1448 | 65 | from maasserver.rpc import getAllClients | 66 | from maasserver.rpc import getAllClients |
1449 | @@ -78,6 +79,7 @@ | |||
1450 | 78 | from maasserver.utils.threads import deferToDatabase | 79 | from maasserver.utils.threads import deferToDatabase |
1451 | 79 | from maasserver.utils.version import get_maas_version_ui | 80 | from maasserver.utils.version import get_maas_version_ui |
1452 | 80 | from provisioningserver.config import is_dev_environment | 81 | from provisioningserver.config import is_dev_environment |
1453 | 82 | from provisioningserver.events import EVENT_TYPES | ||
1454 | 81 | from provisioningserver.import_images.download_descriptions import ( | 83 | from provisioningserver.import_images.download_descriptions import ( |
1455 | 82 | download_all_image_descriptions, | 84 | download_all_image_descriptions, |
1456 | 83 | image_passes_filter, | 85 | image_passes_filter, |
1457 | @@ -661,10 +663,13 @@ | |||
1458 | 661 | # not allowed. | 663 | # not allowed. |
1459 | 662 | prev_largefile = largefile | 664 | prev_largefile = largefile |
1460 | 663 | largefile = None | 665 | largefile = None |
1462 | 664 | maaslog.warning( | 666 | msg = ( |
1463 | 665 | "Hash mismatch for prev_file=%s resourceset=%s " | 667 | "Hash mismatch for prev_file=%s resourceset=%s " |
1466 | 666 | "resource=%s", | 668 | "resource=%s" % (prev_largefile, resource_set, resource) |
1467 | 667 | prev_largefile, resource_set, resource) | 669 | ) |
1468 | 670 | Event.objects.create_region_event( | ||
1469 | 671 | EVENT_TYPES.REGION_IMPORT_WARNING, msg) | ||
1470 | 672 | maaslog.warning(msg) | ||
1471 | 668 | 673 | ||
1472 | 669 | if largefile is None: | 674 | if largefile is None: |
1473 | 670 | # The resource file current does not have a largefile linked. Lets | 675 | # The resource file current does not have a largefile linked. Lets |
1474 | @@ -695,8 +700,10 @@ | |||
1475 | 695 | is_resource_initially_complete and | 700 | is_resource_initially_complete and |
1476 | 696 | resource.get_latest_complete_set() is None) | 701 | resource.get_latest_complete_set() is None) |
1477 | 697 | if is_resource_broken: | 702 | if is_resource_broken: |
1480 | 698 | maaslog.error( | 703 | msg = "Resource %s has no complete resource set!" % resource |
1481 | 699 | "Resource %s has no complete resource set!", resource) | 704 | Event.objects.create_region_event( |
1482 | 705 | EVENT_TYPES.REGION_IMPORT_ERROR, msg) | ||
1483 | 706 | maaslog.error(msg) | ||
1484 | 700 | 707 | ||
1485 | 701 | if prev_largefile is not None: | 708 | if prev_largefile is not None: |
1486 | 702 | # If the previous largefile had a miss matching sha256 then it | 709 | # If the previous largefile had a miss matching sha256 then it |
1487 | @@ -773,11 +780,15 @@ | |||
1488 | 773 | # Calculated sha256 hash from the data does not match, what | 780 | # Calculated sha256 hash from the data does not match, what |
1489 | 774 | # simplestreams is telling us it should be. This resource file | 781 | # simplestreams is telling us it should be. This resource file |
1490 | 775 | # will be deleted since it is corrupt. | 782 | # will be deleted since it is corrupt. |
1492 | 776 | maaslog.error( | 783 | msg = ( |
1493 | 777 | "Failed to finalize boot image %s. Unexpected " | 784 | "Failed to finalize boot image %s. Unexpected " |
1497 | 778 | "checksum '%s' (found: %s expected: %s)", | 785 | "checksum '%s' (found: %s expected: %s)" % |
1498 | 779 | ident, cksummer.algorithm, | 786 | ( |
1499 | 780 | cksummer.hexdigest(), cksummer.expected) | 787 | ident, cksummer.algorithm, cksummer.hexdigest(), |
1500 | 788 | cksummer.expected)) | ||
1501 | 789 | Event.objects.create_region_event( | ||
1502 | 790 | EVENT_TYPES.REGION_IMPORT_ERROR, msg) | ||
1503 | 791 | maaslog.error(msg) | ||
1504 | 781 | transactional(rfile.delete)() | 792 | transactional(rfile.delete)() |
1505 | 782 | else: | 793 | else: |
1506 | 783 | maaslog.debug('Finalized boot image %s.', ident) | 794 | maaslog.debug('Finalized boot image %s.', ident) |
1507 | @@ -877,11 +888,15 @@ | |||
1508 | 877 | self.get_resource_identity(delete_resource)) | 888 | self.get_resource_identity(delete_resource)) |
1509 | 878 | delete_resource.delete() | 889 | delete_resource.delete() |
1510 | 879 | else: | 890 | else: |
1512 | 880 | maaslog.info( | 891 | msg = ( |
1513 | 881 | "Boot image %s no longer exists in stream, but " | 892 | "Boot image %s no longer exists in stream, but " |
1514 | 882 | "remains in selections. To delete this image " | 893 | "remains in selections. To delete this image " |
1517 | 883 | "remove its selection.", | 894 | "remove its selection." % |
1518 | 884 | self.get_resource_identity(delete_resource)) | 895 | self.get_resource_identity(delete_resource) |
1519 | 896 | ) | ||
1520 | 897 | Event.objects.create_region_event( | ||
1521 | 898 | EVENT_TYPES.REGION_IMPORT_INFO, msg) | ||
1522 | 899 | maaslog.info(msg) | ||
1523 | 885 | else: | 900 | else: |
1524 | 886 | # No resource set on the boot resource so it should be | 901 | # No resource set on the boot resource so it should be |
1525 | 887 | # removed as it has not files. | 902 | # removed as it has not files. |
1526 | @@ -960,6 +975,8 @@ | |||
1527 | 960 | "Finalization of imported images skipped, " | 975 | "Finalization of imported images skipped, " |
1528 | 961 | "or all %s synced images would be deleted." % ( | 976 | "or all %s synced images would be deleted." % ( |
1529 | 962 | self._resources_to_delete)) | 977 | self._resources_to_delete)) |
1530 | 978 | Event.objects.create_region_event( | ||
1531 | 979 | EVENT_TYPES.REGION_IMPORT_ERROR, error_msg) | ||
1532 | 963 | maaslog.error(error_msg) | 980 | maaslog.error(error_msg) |
1533 | 964 | if notify is not None: | 981 | if notify is not None: |
1534 | 965 | failure = Failure(Exception(error_msg)) | 982 | failure = Failure(Exception(error_msg)) |
1535 | @@ -1192,7 +1209,10 @@ | |||
1536 | 1192 | 1209 | ||
1537 | 1193 | # Download all of the metadata first. | 1210 | # Download all of the metadata first. |
1538 | 1194 | for source in sources: | 1211 | for source in sources: |
1540 | 1195 | maaslog.info("Importing images from source: %s", source['url']) | 1212 | msg = "Importing images from source: %s" % source['url'] |
1541 | 1213 | Event.objects.create_region_event( | ||
1542 | 1214 | EVENT_TYPES.REGION_IMPORT_INFO, msg) | ||
1543 | 1215 | maaslog.info(msg) | ||
1544 | 1196 | download_boot_resources( | 1216 | download_boot_resources( |
1545 | 1197 | source['url'], store, product_mapping, | 1217 | source['url'], store, product_mapping, |
1546 | 1198 | keyring_file=source.get('keyring')) | 1218 | keyring_file=source.get('keyring')) |
1547 | @@ -1318,15 +1338,21 @@ | |||
1548 | 1318 | with tempdir('keyrings') as keyrings_path: | 1338 | with tempdir('keyrings') as keyrings_path: |
1549 | 1319 | sources = get_boot_sources() | 1339 | sources = get_boot_sources() |
1550 | 1320 | sources = write_all_keyrings(keyrings_path, sources) | 1340 | sources = write_all_keyrings(keyrings_path, sources) |
1553 | 1321 | maaslog.info( | 1341 | msg = ( |
1554 | 1322 | "Started importing of boot images from %d source(s).", | 1342 | "Started importing of boot images from %d source(s)." % |
1555 | 1323 | len(sources)) | 1343 | len(sources)) |
1556 | 1344 | Event.objects.create_region_event(EVENT_TYPES.REGION_IMPORT_INFO, msg) | ||
1557 | 1345 | maaslog.info(msg) | ||
1558 | 1324 | 1346 | ||
1559 | 1325 | image_descriptions = download_all_image_descriptions(sources) | 1347 | image_descriptions = download_all_image_descriptions(sources) |
1560 | 1326 | if image_descriptions.is_empty(): | 1348 | if image_descriptions.is_empty(): |
1562 | 1327 | maaslog.warning( | 1349 | msg = ( |
1563 | 1328 | "Unable to import boot images, no image " | 1350 | "Unable to import boot images, no image " |
1565 | 1329 | "descriptions avaliable.") | 1351 | "descriptions avaliable." |
1566 | 1352 | ) | ||
1567 | 1353 | Event.objects.create_region_event( | ||
1568 | 1354 | EVENT_TYPES.REGION_IMPORT_WARNING, msg) | ||
1569 | 1355 | maaslog.warning(msg) | ||
1570 | 1330 | return | 1356 | return |
1571 | 1331 | product_mapping = map_products(image_descriptions) | 1357 | product_mapping = map_products(image_descriptions) |
1572 | 1332 | 1358 | ||
1573 | 1333 | 1359 | ||
1574 | === modified file 'src/maasserver/clusterrpc/power_parameters.py' | |||
1575 | --- src/maasserver/clusterrpc/power_parameters.py 2016-10-20 19:41:25 +0000 | |||
1576 | +++ src/maasserver/clusterrpc/power_parameters.py 2016-12-07 15:50:52 +0000 | |||
1577 | @@ -14,7 +14,7 @@ | |||
1578 | 14 | power type with a set of power parameters. | 14 | power type with a set of power parameters. |
1579 | 15 | 15 | ||
1580 | 16 | The power types are retrieved from the cluster controllers using the json | 16 | The power types are retrieved from the cluster controllers using the json |
1582 | 17 | schema provisioningserver.power_schema.JSON_POWER_TYPE_SCHEMA. To add new | 17 | schema provisioningserver.drivers.power.JSON_POWER_DRIVERS_SCHEMA. To add new |
1583 | 18 | parameters requires changes to hardware drivers that run in the cluster | 18 | parameters requires changes to hardware drivers that run in the cluster |
1584 | 19 | controllers. | 19 | controllers. |
1585 | 20 | """ | 20 | """ |
1586 | @@ -33,10 +33,8 @@ | |||
1587 | 33 | from maasserver.config_forms import DictCharField | 33 | from maasserver.config_forms import DictCharField |
1588 | 34 | from maasserver.fields import MACAddressFormField | 34 | from maasserver.fields import MACAddressFormField |
1589 | 35 | from maasserver.utils.forms import compose_invalid_choice_text | 35 | from maasserver.utils.forms import compose_invalid_choice_text |
1594 | 36 | from provisioningserver.power.schema import ( | 36 | from provisioningserver.drivers import SETTING_PARAMETER_FIELD_SCHEMA |
1595 | 37 | JSON_POWER_TYPE_SCHEMA, | 37 | from provisioningserver.drivers.power import JSON_POWER_DRIVERS_SCHEMA |
1592 | 38 | POWER_TYPE_PARAMETER_FIELD_SCHEMA, | ||
1593 | 39 | ) | ||
1596 | 40 | from provisioningserver.rpc import cluster | 38 | from provisioningserver.rpc import cluster |
1597 | 41 | 39 | ||
1598 | 42 | 40 | ||
1599 | @@ -93,10 +91,10 @@ | |||
1600 | 93 | :type description: string | 91 | :type description: string |
1601 | 94 | :param fields: The fields that make up the parameters for the power | 92 | :param fields: The fields that make up the parameters for the power |
1602 | 95 | type. Will be validated against | 93 | type. Will be validated against |
1604 | 96 | POWER_TYPE_PARAMETER_FIELD_SCHEMA. | 94 | SETTING_PARAMETER_FIELD_SCHEMA. |
1605 | 97 | :param missing_packages: System packages that must be installed on | 95 | :param missing_packages: System packages that must be installed on |
1606 | 98 | the cluster before the power type can be used. | 96 | the cluster before the power type can be used. |
1608 | 99 | :type fields: list of `make_json_field` results. | 97 | :type fields: list of `make_setting_field` results. |
1609 | 100 | :param parameters_set: An existing list of power type parameters to | 98 | :param parameters_set: An existing list of power type parameters to |
1610 | 101 | mutate. | 99 | mutate. |
1611 | 102 | :type parameters_set: list | 100 | :type parameters_set: list |
1612 | @@ -107,7 +105,7 @@ | |||
1613 | 107 | field_set_schema = { | 105 | field_set_schema = { |
1614 | 108 | 'title': "Power type parameters field set schema", | 106 | 'title': "Power type parameters field set schema", |
1615 | 109 | 'type': 'array', | 107 | 'type': 'array', |
1617 | 110 | 'items': POWER_TYPE_PARAMETER_FIELD_SCHEMA, | 108 | 'items': SETTING_PARAMETER_FIELD_SCHEMA, |
1618 | 111 | } | 109 | } |
1619 | 112 | validate(fields, field_set_schema) | 110 | validate(fields, field_set_schema) |
1620 | 113 | parameters_set.append( | 111 | parameters_set.append( |
1621 | @@ -132,7 +130,7 @@ | |||
1622 | 132 | :return: A dict of power parameters for all power types, indexed by | 130 | :return: A dict of power parameters for all power types, indexed by |
1623 | 133 | power type name. | 131 | power type name. |
1624 | 134 | """ | 132 | """ |
1626 | 135 | validate(json_power_type_parameters, JSON_POWER_TYPE_SCHEMA) | 133 | validate(json_power_type_parameters, JSON_POWER_DRIVERS_SCHEMA) |
1627 | 136 | power_parameters = { | 134 | power_parameters = { |
1628 | 137 | # Empty type, for the case where nothing is entered in the form yet. | 135 | # Empty type, for the case where nothing is entered in the form yet. |
1629 | 138 | '': DictCharField( | 136 | '': DictCharField( |
1630 | @@ -197,7 +195,7 @@ | |||
1631 | 197 | """Query every cluster controller and obtain all known power types. | 195 | """Query every cluster controller and obtain all known power types. |
1632 | 198 | 196 | ||
1633 | 199 | :return: a list of power types matching the schema | 197 | :return: a list of power types matching the schema |
1635 | 200 | provisioningserver.power_schema.JSON_POWER_TYPE_PARAMETERS_SCHEMA | 198 | provisioningserver.drivers.power.JSON_POWER_DRIVERS_SCHEMA |
1636 | 201 | """ | 199 | """ |
1637 | 202 | merged_types = [] | 200 | merged_types = [] |
1638 | 203 | responses = call_clusters( | 201 | responses = call_clusters( |
1639 | 204 | 202 | ||
1640 | === modified file 'src/maasserver/clusterrpc/testing/power_parameters.py' | |||
1641 | --- src/maasserver/clusterrpc/testing/power_parameters.py 2016-06-22 17:03:02 +0000 | |||
1642 | +++ src/maasserver/clusterrpc/testing/power_parameters.py 2016-12-07 15:50:52 +0000 | |||
1643 | @@ -11,7 +11,7 @@ | |||
1644 | 11 | 11 | ||
1645 | 12 | from fixtures import Fixture | 12 | from fixtures import Fixture |
1646 | 13 | from maasserver.clusterrpc import power_parameters | 13 | from maasserver.clusterrpc import power_parameters |
1648 | 14 | from provisioningserver.power import schema | 14 | from provisioningserver.drivers.power import PowerDriverRegistry |
1649 | 15 | from testtools import monkey | 15 | from testtools import monkey |
1650 | 16 | 16 | ||
1651 | 17 | 17 | ||
1652 | @@ -26,7 +26,9 @@ | |||
1653 | 26 | super(StaticPowerTypesFixture, self).setUp() | 26 | super(StaticPowerTypesFixture, self).setUp() |
1654 | 27 | # This patch prevents communication with a non-existent cluster | 27 | # This patch prevents communication with a non-existent cluster |
1655 | 28 | # controller when fetching power types. | 28 | # controller when fetching power types. |
1656 | 29 | power_types = PowerDriverRegistry.get_schema( | ||
1657 | 30 | detect_missing_packages=False) | ||
1658 | 29 | restore = monkey.patch( | 31 | restore = monkey.patch( |
1659 | 30 | power_parameters, 'get_all_power_types_from_clusters', | 32 | power_parameters, 'get_all_power_types_from_clusters', |
1661 | 31 | Mock(return_value=schema.JSON_POWER_TYPE_PARAMETERS)) | 33 | Mock(return_value=power_types)) |
1662 | 32 | self.addCleanup(restore) | 34 | self.addCleanup(restore) |
1663 | 33 | 35 | ||
1664 | === modified file 'src/maasserver/clusterrpc/tests/test_power_parameters.py' | |||
1665 | --- src/maasserver/clusterrpc/tests/test_power_parameters.py 2016-10-20 08:41:30 +0000 | |||
1666 | +++ src/maasserver/clusterrpc/tests/test_power_parameters.py 2016-12-07 15:50:52 +0000 | |||
1667 | @@ -14,9 +14,9 @@ | |||
1668 | 14 | add_power_type_parameters, | 14 | add_power_type_parameters, |
1669 | 15 | get_power_type_parameters_from_json, | 15 | get_power_type_parameters_from_json, |
1670 | 16 | get_power_types, | 16 | get_power_types, |
1672 | 17 | JSON_POWER_TYPE_SCHEMA, | 17 | JSON_POWER_DRIVERS_SCHEMA, |
1673 | 18 | make_form_field, | 18 | make_form_field, |
1675 | 19 | POWER_TYPE_PARAMETER_FIELD_SCHEMA, | 19 | SETTING_PARAMETER_FIELD_SCHEMA, |
1676 | 20 | ) | 20 | ) |
1677 | 21 | from maasserver.config_forms import DictCharField | 21 | from maasserver.config_forms import DictCharField |
1678 | 22 | from maasserver.fields import MACAddressFormField | 22 | from maasserver.fields import MACAddressFormField |
1679 | @@ -25,7 +25,7 @@ | |||
1680 | 25 | from maasserver.utils.forms import compose_invalid_choice_text | 25 | from maasserver.utils.forms import compose_invalid_choice_text |
1681 | 26 | from maastesting.matchers import MockCalledOnceWith | 26 | from maastesting.matchers import MockCalledOnceWith |
1682 | 27 | from maastesting.testcase import MAASTestCase | 27 | from maastesting.testcase import MAASTestCase |
1684 | 28 | from provisioningserver.power.schema import make_json_field | 28 | from provisioningserver.drivers import make_setting_field |
1685 | 29 | 29 | ||
1686 | 30 | 30 | ||
1687 | 31 | class TestGetPowerTypeParametersFromJSON(MAASServerTestCase): | 31 | class TestGetPowerTypeParametersFromJSON(MAASServerTestCase): |
1688 | @@ -186,15 +186,15 @@ | |||
1689 | 186 | self.assertEquals(json_field['default'], django_field.initial) | 186 | self.assertEquals(json_field['default'], django_field.initial) |
1690 | 187 | 187 | ||
1691 | 188 | 188 | ||
1694 | 189 | class TestMakeJSONField(MAASServerTestCase): | 189 | class TestMakeSettingField(MAASServerTestCase): |
1695 | 190 | """Test that make_json_field() creates JSON-verifiable fields.""" | 190 | """Test that make_setting_field() creates JSON-verifiable fields.""" |
1696 | 191 | 191 | ||
1697 | 192 | def test__returns_json_verifiable_dict(self): | 192 | def test__returns_json_verifiable_dict(self): |
1700 | 193 | json_field = make_json_field('some_field', 'Some Label') | 193 | json_field = make_setting_field('some_field', 'Some Label') |
1701 | 194 | jsonschema.validate(json_field, POWER_TYPE_PARAMETER_FIELD_SCHEMA) | 194 | jsonschema.validate(json_field, SETTING_PARAMETER_FIELD_SCHEMA) |
1702 | 195 | 195 | ||
1703 | 196 | def test__provides_sane_default_values(self): | 196 | def test__provides_sane_default_values(self): |
1705 | 197 | json_field = make_json_field('some_field', 'Some Label') | 197 | json_field = make_setting_field('some_field', 'Some Label') |
1706 | 198 | expected_field = { | 198 | expected_field = { |
1707 | 199 | 'name': 'some_field', | 199 | 'name': 'some_field', |
1708 | 200 | 'label': 'Some Label', | 200 | 'label': 'Some Label', |
1709 | @@ -219,16 +219,16 @@ | |||
1710 | 219 | 'default': 'spam', | 219 | 'default': 'spam', |
1711 | 220 | 'scope': 'bmc', | 220 | 'scope': 'bmc', |
1712 | 221 | } | 221 | } |
1714 | 222 | json_field = make_json_field(**expected_field) | 222 | json_field = make_setting_field(**expected_field) |
1715 | 223 | self.assertEqual(expected_field, json_field) | 223 | self.assertEqual(expected_field, json_field) |
1716 | 224 | 224 | ||
1717 | 225 | def test__validates_choices(self): | 225 | def test__validates_choices(self): |
1718 | 226 | self.assertRaises( | 226 | self.assertRaises( |
1720 | 227 | jsonschema.ValidationError, make_json_field, | 227 | jsonschema.ValidationError, make_setting_field, |
1721 | 228 | 'some_field', 'Some Label', choices="Nonsense") | 228 | 'some_field', 'Some Label', choices="Nonsense") |
1722 | 229 | 229 | ||
1723 | 230 | def test__creates_password_fields(self): | 230 | def test__creates_password_fields(self): |
1725 | 231 | json_field = make_json_field( | 231 | json_field = make_setting_field( |
1726 | 232 | 'some_field', 'Some Label', field_type='password') | 232 | 'some_field', 'Some Label', field_type='password') |
1727 | 233 | expected_field = { | 233 | expected_field = { |
1728 | 234 | 'name': 'some_field', | 234 | 'name': 'some_field', |
1729 | @@ -245,7 +245,7 @@ | |||
1730 | 245 | class TestAddPowerTypeParameters(MAASServerTestCase): | 245 | class TestAddPowerTypeParameters(MAASServerTestCase): |
1731 | 246 | 246 | ||
1732 | 247 | def make_field(self): | 247 | def make_field(self): |
1734 | 248 | return make_json_field( | 248 | return make_setting_field( |
1735 | 249 | self.getUniqueString(), self.getUniqueString()) | 249 | self.getUniqueString(), self.getUniqueString()) |
1736 | 250 | 250 | ||
1737 | 251 | def test_adding_existing_types_is_a_no_op(self): | 251 | def test_adding_existing_types_is_a_no_op(self): |
1738 | @@ -289,7 +289,7 @@ | |||
1739 | 289 | missing_packages=[], | 289 | missing_packages=[], |
1740 | 290 | parameters_set=parameters_set) | 290 | parameters_set=parameters_set) |
1741 | 291 | jsonschema.validate( | 291 | jsonschema.validate( |
1743 | 292 | parameters_set, JSON_POWER_TYPE_SCHEMA) | 292 | parameters_set, JSON_POWER_DRIVERS_SCHEMA) |
1744 | 293 | 293 | ||
1745 | 294 | 294 | ||
1746 | 295 | class TestPowerTypes(MAASTestCase): | 295 | class TestPowerTypes(MAASTestCase): |
1747 | 296 | 296 | ||
1748 | === modified file 'src/maasserver/dhcp.py' | |||
1749 | --- src/maasserver/dhcp.py 2016-11-01 16:46:19 +0000 | |||
1750 | +++ src/maasserver/dhcp.py 2016-12-07 15:50:52 +0000 | |||
1751 | @@ -29,10 +29,7 @@ | |||
1752 | 29 | IPRANGE_TYPE, | 29 | IPRANGE_TYPE, |
1753 | 30 | SERVICE_STATUS, | 30 | SERVICE_STATUS, |
1754 | 31 | ) | 31 | ) |
1759 | 32 | from maasserver.exceptions import ( | 32 | from maasserver.exceptions import UnresolvableHost |
1756 | 33 | DHCPConfigurationError, | ||
1757 | 34 | UnresolvableHost, | ||
1758 | 35 | ) | ||
1760 | 36 | from maasserver.models import ( | 33 | from maasserver.models import ( |
1761 | 37 | Config, | 34 | Config, |
1762 | 38 | DHCPSnippet, | 35 | DHCPSnippet, |
1763 | @@ -40,6 +37,7 @@ | |||
1764 | 40 | RackController, | 37 | RackController, |
1765 | 41 | Service, | 38 | Service, |
1766 | 42 | StaticIPAddress, | 39 | StaticIPAddress, |
1767 | 40 | Subnet, | ||
1768 | 43 | ) | 41 | ) |
1769 | 44 | from maasserver.rpc import ( | 42 | from maasserver.rpc import ( |
1770 | 45 | getAllClients, | 43 | getAllClients, |
1771 | @@ -88,14 +86,14 @@ | |||
1772 | 88 | return key | 86 | return key |
1773 | 89 | 87 | ||
1774 | 90 | 88 | ||
1776 | 91 | def split_ipv4_ipv6_subnets(subnets): | 89 | def split_managed_ipv4_ipv6_subnets(subnets: Iterable[Subnet]): |
1777 | 92 | """Divide `subnets` into IPv4 ones and IPv6 ones. | 90 | """Divide `subnets` into IPv4 ones and IPv6 ones. |
1778 | 93 | 91 | ||
1779 | 94 | :param subnets: A sequence of subnets. | 92 | :param subnets: A sequence of subnets. |
1780 | 95 | :return: A tuple of two separate sequences: IPv4 subnets and IPv6 subnets. | 93 | :return: A tuple of two separate sequences: IPv4 subnets and IPv6 subnets. |
1781 | 96 | """ | 94 | """ |
1782 | 97 | split = defaultdict(list) | 95 | split = defaultdict(list) |
1784 | 98 | for subnet in subnets: | 96 | for subnet in (s for s in subnets if s.managed is True): |
1785 | 99 | split[subnet.get_ipnetwork().version].append(subnet) | 97 | split[subnet.get_ipnetwork().version].append(subnet) |
1786 | 100 | assert len(split) <= 2, ( | 98 | assert len(split) <= 2, ( |
1787 | 101 | "Unexpected IP version(s): %s" % ', '.join(list(split.keys()))) | 99 | "Unexpected IP version(s): %s" % ', '.join(list(split.keys()))) |
1788 | @@ -193,18 +191,19 @@ | |||
1789 | 193 | return [] | 191 | return [] |
1790 | 194 | 192 | ||
1791 | 195 | 193 | ||
1794 | 196 | def get_managed_vlans_for(rack_controller): | 194 | def gen_managed_vlans_for(rack_controller): |
1795 | 197 | """Return list of `VLAN` for the `rack_controller` when DHCP is enabled and | 195 | """Yeilds each `VLAN` for the `rack_controller` when DHCP is enabled and |
1796 | 198 | `rack_controller` is either the `primary_rack` or the `secondary_rack`. | 196 | `rack_controller` is either the `primary_rack` or the `secondary_rack`. |
1797 | 199 | """ | 197 | """ |
1798 | 200 | interfaces = rack_controller.interface_set.filter( | 198 | interfaces = rack_controller.interface_set.filter( |
1799 | 201 | Q(vlan__dhcp_on=True) & ( | 199 | Q(vlan__dhcp_on=True) & ( |
1800 | 202 | Q(vlan__primary_rack=rack_controller) | | 200 | Q(vlan__primary_rack=rack_controller) | |
1806 | 203 | Q(vlan__secondary_rack=rack_controller))).select_related("vlan") | 201 | Q(vlan__secondary_rack=rack_controller))) |
1807 | 204 | return { | 202 | interfaces = interfaces.prefetch_related("vlan__relay_vlans") |
1808 | 205 | interface.vlan | 203 | for interface in interfaces: |
1809 | 206 | for interface in interfaces | 204 | yield interface.vlan |
1810 | 207 | } | 205 | for relayed_vlan in interface.vlan.relay_vlans.all(): |
1811 | 206 | yield relayed_vlan | ||
1812 | 208 | 207 | ||
1813 | 209 | 208 | ||
1814 | 210 | def ip_is_on_vlan(ip_address, vlan): | 209 | def ip_is_on_vlan(ip_address, vlan): |
1815 | @@ -459,12 +458,6 @@ | |||
1816 | 459 | interfaces = get_interfaces_with_ip_on_vlan( | 458 | interfaces = get_interfaces_with_ip_on_vlan( |
1817 | 460 | rack_controller, vlan, ip_version) | 459 | rack_controller, vlan, ip_version) |
1818 | 461 | interface = get_best_interface(interfaces) | 460 | interface = get_best_interface(interfaces) |
1819 | 462 | if interface is None: | ||
1820 | 463 | raise DHCPConfigurationError( | ||
1821 | 464 | "No IPv%d interface on rack controller '%s' has an IP address on " | ||
1822 | 465 | "any subnet on VLAN '%s.%d'." % ( | ||
1823 | 466 | ip_version, rack_controller.hostname, vlan.fabric.name, | ||
1824 | 467 | vlan.vid)) | ||
1825 | 468 | 461 | ||
1826 | 469 | # Generate the failover peer for this VLAN. | 462 | # Generate the failover peer for this VLAN. |
1827 | 470 | if vlan.secondary_rack_id is not None: | 463 | if vlan.secondary_rack_id is not None: |
1828 | @@ -496,7 +489,7 @@ | |||
1829 | 496 | hosts = make_hosts_for_subnets(subnets, nodes_dhcp_snippets) | 489 | hosts = make_hosts_for_subnets(subnets, nodes_dhcp_snippets) |
1830 | 497 | return ( | 490 | return ( |
1831 | 498 | peer_config, sorted(subnet_configs, key=itemgetter("subnet")), | 491 | peer_config, sorted(subnet_configs, key=itemgetter("subnet")), |
1833 | 499 | hosts, interface.name) | 492 | hosts, None if interface is None else interface.name) |
1834 | 500 | 493 | ||
1835 | 501 | 494 | ||
1836 | 502 | @synchronous | 495 | @synchronous |
1837 | @@ -505,11 +498,11 @@ | |||
1838 | 505 | """Return tuple with IPv4 and IPv6 configurations for the | 498 | """Return tuple with IPv4 and IPv6 configurations for the |
1839 | 506 | rack controller.""" | 499 | rack controller.""" |
1840 | 507 | # Get list of all vlans that are being managed by the rack controller. | 500 | # Get list of all vlans that are being managed by the rack controller. |
1842 | 508 | vlans = get_managed_vlans_for(rack_controller) | 501 | vlans = gen_managed_vlans_for(rack_controller) |
1843 | 509 | 502 | ||
1844 | 510 | # Group the subnets on each VLAN into IPv4 and IPv6 subnets. | 503 | # Group the subnets on each VLAN into IPv4 and IPv6 subnets. |
1845 | 511 | vlan_subnets = { | 504 | vlan_subnets = { |
1847 | 512 | vlan: split_ipv4_ipv6_subnets(vlan.subnet_set.all()) | 505 | vlan: split_managed_ipv4_ipv6_subnets(vlan.subnet_set.all()) |
1848 | 513 | for vlan in vlans | 506 | for vlan in vlans |
1849 | 514 | } | 507 | } |
1850 | 515 | 508 | ||
1851 | @@ -561,52 +554,40 @@ | |||
1852 | 561 | for vlan, (subnets_v4, subnets_v6) in vlan_subnets.items(): | 554 | for vlan, (subnets_v4, subnets_v6) in vlan_subnets.items(): |
1853 | 562 | # IPv4 | 555 | # IPv4 |
1854 | 563 | if len(subnets_v4) > 0: | 556 | if len(subnets_v4) > 0: |
1876 | 564 | try: | 557 | config = get_dhcp_configure_for( |
1877 | 565 | config = get_dhcp_configure_for( | 558 | 4, rack_controller, vlan, subnets_v4, ntp_servers, |
1878 | 566 | 4, rack_controller, vlan, subnets_v4, ntp_servers, | 559 | default_domain, dhcp_snippets) |
1879 | 567 | default_domain, dhcp_snippets) | 560 | failover_peer, subnets, hosts, interface = config |
1880 | 568 | except DHCPConfigurationError: | 561 | if failover_peer is not None: |
1881 | 569 | # XXX bug #1602412: this silently breaks DHCPv4, but we cannot | 562 | failover_peers_v4.append(failover_peer) |
1882 | 570 | # allow it to crash here since DHCPv6 might be able to run. | 563 | shared_networks_v4.append({ |
1883 | 571 | # This error may be irrelevant if there is an IPv4 network in | 564 | "name": "vlan-%d" % vlan.id, |
1884 | 572 | # the MAAS model which is not configured on the rack, and the | 565 | "subnets": subnets, |
1885 | 573 | # user only wants to serve DHCPv6. But it is still something | 566 | }) |
1886 | 574 | # worth noting, so log it and continue. | 567 | hosts_v4.extend(hosts) |
1887 | 575 | log.err(None, "Failure configuring DHCPv4.") | 568 | if interface is not None: |
1867 | 576 | else: | ||
1868 | 577 | failover_peer, subnets, hosts, interface = config | ||
1869 | 578 | if failover_peer is not None: | ||
1870 | 579 | failover_peers_v4.append(failover_peer) | ||
1871 | 580 | shared_networks_v4.append({ | ||
1872 | 581 | "name": "vlan-%d" % vlan.id, | ||
1873 | 582 | "subnets": subnets, | ||
1874 | 583 | }) | ||
1875 | 584 | hosts_v4.extend(hosts) | ||
1888 | 585 | interfaces_v4.add(interface) | 569 | interfaces_v4.add(interface) |
1889 | 586 | # IPv6 | 570 | # IPv6 |
1890 | 587 | if len(subnets_v6) > 0: | 571 | if len(subnets_v6) > 0: |
1912 | 588 | try: | 572 | config = get_dhcp_configure_for( |
1913 | 589 | config = get_dhcp_configure_for( | 573 | 6, rack_controller, vlan, subnets_v6, |
1914 | 590 | 6, rack_controller, vlan, subnets_v6, | 574 | ntp_servers, default_domain, dhcp_snippets) |
1915 | 591 | ntp_servers, default_domain, dhcp_snippets) | 575 | failover_peer, subnets, hosts, interface = config |
1916 | 592 | except DHCPConfigurationError: | 576 | if failover_peer is not None: |
1917 | 593 | # XXX bug #1602412: this silently breaks DHCPv6, but we cannot | 577 | failover_peers_v6.append(failover_peer) |
1918 | 594 | # allow it to crash here since DHCPv4 might be able to run. | 578 | shared_networks_v6.append({ |
1919 | 595 | # This error may be irrelevant if there is an IPv6 network in | 579 | "name": "vlan-%d" % vlan.id, |
1920 | 596 | # the MAAS model which is not configured on the rack, and the | 580 | "subnets": subnets, |
1921 | 597 | # user only wants to serve DHCPv4. But it is still something | 581 | }) |
1922 | 598 | # worth noting, so log it and continue. | 582 | hosts_v6.extend(hosts) |
1923 | 599 | log.err(None, "Failure configuring DHCPv6.") | 583 | if interface is not None: |
1903 | 600 | else: | ||
1904 | 601 | failover_peer, subnets, hosts, interface = config | ||
1905 | 602 | if failover_peer is not None: | ||
1906 | 603 | failover_peers_v6.append(failover_peer) | ||
1907 | 604 | shared_networks_v6.append({ | ||
1908 | 605 | "name": "vlan-%d" % vlan.id, | ||
1909 | 606 | "subnets": subnets, | ||
1910 | 607 | }) | ||
1911 | 608 | hosts_v6.extend(hosts) | ||
1924 | 609 | interfaces_v6.add(interface) | 584 | interfaces_v6.add(interface) |
1925 | 585 | # When no interfaces exist for each IP version clear the shared networks | ||
1926 | 586 | # as DHCP server cannot be started and needs to be stopped. | ||
1927 | 587 | if len(interfaces_v4) == 0: | ||
1928 | 588 | shared_networks_v4 = {} | ||
1929 | 589 | if len(interfaces_v6) == 0: | ||
1930 | 590 | shared_networks_v6 = {} | ||
1931 | 610 | return DHCPConfigurationForRack( | 591 | return DHCPConfigurationForRack( |
1932 | 611 | failover_peers_v4, shared_networks_v4, hosts_v4, interfaces_v4, | 592 | failover_peers_v4, shared_networks_v4, hosts_v4, interfaces_v4, |
1933 | 612 | failover_peers_v6, shared_networks_v6, hosts_v6, interfaces_v6, | 593 | failover_peers_v6, shared_networks_v6, hosts_v6, interfaces_v6, |
1934 | 613 | 594 | ||
1935 | === renamed directory 'src/maas' => 'src/maasserver/djangosettings' | |||
1936 | === modified file 'src/maasserver/djangosettings/demo.py' | |||
1937 | --- src/maas/demo.py 2016-06-07 19:59:49 +0000 | |||
1938 | +++ src/maasserver/djangosettings/demo.py 2016-12-07 15:50:52 +0000 | |||
1939 | @@ -5,7 +5,7 @@ | |||
1940 | 5 | 5 | ||
1941 | 6 | from os.path import abspath | 6 | from os.path import abspath |
1942 | 7 | 7 | ||
1944 | 8 | from maas import ( | 8 | from maasserver.djangosettings import ( |
1945 | 9 | development, | 9 | development, |
1946 | 10 | import_settings, | 10 | import_settings, |
1947 | 11 | settings, | 11 | settings, |
1948 | 12 | 12 | ||
1949 | === modified file 'src/maasserver/djangosettings/development.py' | |||
1950 | --- src/maas/development.py 2016-10-18 11:21:26 +0000 | |||
1951 | +++ src/maasserver/djangosettings/development.py 2016-12-07 15:50:52 +0000 | |||
1952 | @@ -7,7 +7,7 @@ | |||
1953 | 7 | from os.path import abspath | 7 | from os.path import abspath |
1954 | 8 | 8 | ||
1955 | 9 | from formencode.validators import StringBool | 9 | from formencode.validators import StringBool |
1957 | 10 | from maas import ( | 10 | from maasserver.djangosettings import ( |
1958 | 11 | fix_up_databases, | 11 | fix_up_databases, |
1959 | 12 | import_settings, | 12 | import_settings, |
1960 | 13 | settings, | 13 | settings, |
1961 | 14 | 14 | ||
1962 | === modified file 'src/maasserver/djangosettings/settings.py' | |||
1963 | --- src/maas/settings.py 2016-11-22 00:53:43 +0000 | |||
1964 | +++ src/maasserver/djangosettings/settings.py 2016-12-07 15:50:52 +0000 | |||
1965 | @@ -6,9 +6,9 @@ | |||
1966 | 6 | import os | 6 | import os |
1967 | 7 | 7 | ||
1968 | 8 | import django.template.base | 8 | import django.template.base |
1969 | 9 | from maas import fix_up_databases | ||
1970 | 10 | from maas.monkey import patch_get_script_prefix | ||
1971 | 11 | from maasserver.config import RegionConfiguration | 9 | from maasserver.config import RegionConfiguration |
1972 | 10 | from maasserver.djangosettings import fix_up_databases | ||
1973 | 11 | from maasserver.djangosettings.monkey import patch_get_script_prefix | ||
1974 | 12 | 12 | ||
1975 | 13 | 13 | ||
1976 | 14 | def _read_timezone(tzfilename='/etc/timezone'): | 14 | def _read_timezone(tzfilename='/etc/timezone'): |
1977 | @@ -265,7 +265,7 @@ | |||
1978 | 265 | 265 | ||
1979 | 266 | ) | 266 | ) |
1980 | 267 | 267 | ||
1982 | 268 | ROOT_URLCONF = 'maas.urls' | 268 | ROOT_URLCONF = 'maasserver.djangosettings.urls' |
1983 | 269 | 269 | ||
1984 | 270 | TEMPLATE_DIRS = ( | 270 | TEMPLATE_DIRS = ( |
1985 | 271 | # Put strings here, like "/home/html/django_templates" | 271 | # Put strings here, like "/home/html/django_templates" |
1986 | 272 | 272 | ||
1987 | === renamed file 'src/maas/tests/test_maas.py' => 'src/maasserver/djangosettings/tests/test_settings.py' | |||
1988 | --- src/maas/tests/test_maas.py 2016-06-21 10:29:11 +0000 | |||
1989 | +++ src/maasserver/djangosettings/tests/test_settings.py 2016-12-07 15:50:52 +0000 | |||
1990 | @@ -10,11 +10,11 @@ | |||
1991 | 10 | 10 | ||
1992 | 11 | from django.conf import settings | 11 | from django.conf import settings |
1993 | 12 | from django.db import connections | 12 | from django.db import connections |
1995 | 13 | from maas import ( | 13 | from maasserver.djangosettings import ( |
1996 | 14 | find_settings, | 14 | find_settings, |
1997 | 15 | import_settings, | 15 | import_settings, |
1998 | 16 | ) | 16 | ) |
2000 | 17 | from maas.settings import ( | 17 | from maasserver.djangosettings.settings import ( |
2001 | 18 | _get_local_timezone, | 18 | _get_local_timezone, |
2002 | 19 | _read_timezone, | 19 | _read_timezone, |
2003 | 20 | ) | 20 | ) |
2004 | 21 | 21 | ||
2005 | === modified file 'src/maasserver/enum.py' | |||
2006 | --- src/maasserver/enum.py 2016-09-08 17:26:54 +0000 | |||
2007 | +++ src/maasserver/enum.py 2016-12-07 15:50:52 +0000 | |||
2008 | @@ -154,6 +154,8 @@ | |||
2009 | 154 | RACK_CONTROLLER = 2 | 154 | RACK_CONTROLLER = 2 |
2010 | 155 | REGION_CONTROLLER = 3 | 155 | REGION_CONTROLLER = 3 |
2011 | 156 | REGION_AND_RACK_CONTROLLER = 4 | 156 | REGION_AND_RACK_CONTROLLER = 4 |
2012 | 157 | CHASSIS = 5 | ||
2013 | 158 | STORAGE = 6 | ||
2014 | 157 | 159 | ||
2015 | 158 | 160 | ||
2016 | 159 | # This is copied in static/js/angular/controllers/subnet_details.js. If you | 161 | # This is copied in static/js/angular/controllers/subnet_details.js. If you |
2017 | @@ -164,6 +166,8 @@ | |||
2018 | 164 | (NODE_TYPE.RACK_CONTROLLER, "Rack controller"), | 166 | (NODE_TYPE.RACK_CONTROLLER, "Rack controller"), |
2019 | 165 | (NODE_TYPE.REGION_CONTROLLER, "Region controller"), | 167 | (NODE_TYPE.REGION_CONTROLLER, "Region controller"), |
2020 | 166 | (NODE_TYPE.REGION_AND_RACK_CONTROLLER, "Region and rack controller"), | 168 | (NODE_TYPE.REGION_AND_RACK_CONTROLLER, "Region and rack controller"), |
2021 | 169 | (NODE_TYPE.CHASSIS, "Chassis"), | ||
2022 | 170 | (NODE_TYPE.STORAGE, "Storage"), | ||
2023 | 167 | ) | 171 | ) |
2024 | 168 | 172 | ||
2025 | 169 | 173 | ||
2026 | 170 | 174 | ||
2027 | === modified file 'src/maasserver/exceptions.py' | |||
2028 | --- src/maasserver/exceptions.py 2016-03-28 13:54:47 +0000 | |||
2029 | +++ src/maasserver/exceptions.py 2016-12-07 15:50:52 +0000 | |||
2030 | @@ -199,7 +199,3 @@ | |||
2031 | 199 | information. | 199 | information. |
2032 | 200 | """ | 200 | """ |
2033 | 201 | api_error = int(http.client.SERVICE_UNAVAILABLE) | 201 | api_error = int(http.client.SERVICE_UNAVAILABLE) |
2034 | 202 | |||
2035 | 203 | |||
2036 | 204 | class DHCPConfigurationError(MAASException): | ||
2037 | 205 | """Raised when the configuration of DHCP hits a problem.""" | ||
2038 | 206 | 202 | ||
2039 | === modified file 'src/maasserver/forms_commission.py' | |||
2040 | --- src/maasserver/forms_commission.py 2015-12-01 18:12:59 +0000 | |||
2041 | +++ src/maasserver/forms_commission.py 2016-12-07 15:50:52 +0000 | |||
2042 | @@ -9,7 +9,6 @@ | |||
2043 | 9 | 9 | ||
2044 | 10 | from django import forms | 10 | from django import forms |
2045 | 11 | from django.core.exceptions import ValidationError | 11 | from django.core.exceptions import ValidationError |
2046 | 12 | from maasserver.enum import POWER_STATE | ||
2047 | 13 | from maasserver.node_action import compile_node_actions | 12 | from maasserver.node_action import compile_node_actions |
2048 | 14 | 13 | ||
2049 | 15 | 14 | ||
2050 | @@ -36,10 +35,6 @@ | |||
2051 | 36 | raise ValidationError( | 35 | raise ValidationError( |
2052 | 37 | "Commission is not available because of the current state " | 36 | "Commission is not available because of the current state " |
2053 | 38 | "of the node.") | 37 | "of the node.") |
2054 | 39 | if self.instance.power_state == POWER_STATE.ON: | ||
2055 | 40 | raise ValidationError( | ||
2056 | 41 | "Commission is not available because of the node is currently " | ||
2057 | 42 | "powered on.") | ||
2058 | 43 | return cleaned_data | 38 | return cleaned_data |
2059 | 44 | 39 | ||
2060 | 45 | def save(self): | 40 | def save(self): |
2061 | 46 | 41 | ||
2062 | === modified file 'src/maasserver/forms_subnet.py' | |||
2063 | --- src/maasserver/forms_subnet.py 2016-09-23 01:32:02 +0000 | |||
2064 | +++ src/maasserver/forms_subnet.py 2016-12-07 15:50:52 +0000 | |||
2065 | @@ -43,6 +43,9 @@ | |||
2066 | 43 | allow_proxy = forms.BooleanField( | 43 | allow_proxy = forms.BooleanField( |
2067 | 44 | required=False) | 44 | required=False) |
2068 | 45 | 45 | ||
2069 | 46 | managed = forms.BooleanField( | ||
2070 | 47 | required=False) | ||
2071 | 48 | |||
2072 | 46 | class Meta: | 49 | class Meta: |
2073 | 47 | model = Subnet | 50 | model = Subnet |
2074 | 48 | fields = ( | 51 | fields = ( |
2075 | @@ -56,6 +59,7 @@ | |||
2076 | 56 | 'rdns_mode', | 59 | 'rdns_mode', |
2077 | 57 | 'active_discovery', | 60 | 'active_discovery', |
2078 | 58 | 'allow_proxy', | 61 | 'allow_proxy', |
2079 | 62 | 'managed', | ||
2080 | 59 | ) | 63 | ) |
2081 | 60 | 64 | ||
2082 | 61 | def __init__(self, *args, **kwargs): | 65 | def __init__(self, *args, **kwargs): |
2083 | @@ -64,9 +68,12 @@ | |||
2084 | 64 | 68 | ||
2085 | 65 | def clean(self): | 69 | def clean(self): |
2086 | 66 | cleaned_data = super(SubnetForm, self).clean() | 70 | cleaned_data = super(SubnetForm, self).clean() |
2088 | 67 | # The default value for allow_proxy is True. | 71 | # The default value for 'allow_proxy' is True. |
2089 | 68 | if 'allow_proxy' not in self.data: | 72 | if 'allow_proxy' not in self.data: |
2090 | 69 | cleaned_data['allow_proxy'] = True | 73 | cleaned_data['allow_proxy'] = True |
2091 | 74 | # The default value for 'managed' is True. | ||
2092 | 75 | if 'managed' not in self.data: | ||
2093 | 76 | cleaned_data['managed'] = True | ||
2094 | 70 | # The ArrayField form has a bug which leaves out the first entry. | 77 | # The ArrayField form has a bug which leaves out the first entry. |
2095 | 71 | if 'dns_servers' in self.data and self.data['dns_servers'] != '': | 78 | if 'dns_servers' in self.data and self.data['dns_servers'] != '': |
2096 | 72 | cleaned_data['dns_servers'] = self.data.getlist('dns_servers') | 79 | cleaned_data['dns_servers'] = self.data.getlist('dns_servers') |
2097 | 73 | 80 | ||
2098 | === modified file 'src/maasserver/forms_vlan.py' | |||
2099 | --- src/maasserver/forms_vlan.py 2016-04-27 20:38:06 +0000 | |||
2100 | +++ src/maasserver/forms_vlan.py 2016-12-07 15:50:52 +0000 | |||
2101 | @@ -31,6 +31,7 @@ | |||
2102 | 31 | 'dhcp_on', | 31 | 'dhcp_on', |
2103 | 32 | 'primary_rack', | 32 | 'primary_rack', |
2104 | 33 | 'secondary_rack', | 33 | 'secondary_rack', |
2105 | 34 | 'relay_vlan', | ||
2106 | 34 | ) | 35 | ) |
2107 | 35 | 36 | ||
2108 | 36 | def __init__(self, *args, **kwargs): | 37 | def __init__(self, *args, **kwargs): |
2109 | @@ -40,6 +41,7 @@ | |||
2110 | 40 | if instance is None and self.fabric is None: | 41 | if instance is None and self.fabric is None: |
2111 | 41 | raise ValueError("Form requires either a instance or a fabric.") | 42 | raise ValueError("Form requires either a instance or a fabric.") |
2112 | 42 | self._set_up_rack_fields() | 43 | self._set_up_rack_fields() |
2113 | 44 | self._set_up_relay_vlan() | ||
2114 | 43 | 45 | ||
2115 | 44 | def _set_up_rack_fields(self): | 46 | def _set_up_rack_fields(self): |
2116 | 45 | qs = RackController.objects.filter_by_vids([self.instance.vid]) | 47 | qs = RackController.objects.filter_by_vids([self.instance.vid]) |
2117 | @@ -61,6 +63,22 @@ | |||
2118 | 61 | secondary_rack = RackController.objects.get(id=secondary_rack_id) | 63 | secondary_rack = RackController.objects.get(id=secondary_rack_id) |
2119 | 62 | self.initial['secondary_rack'] = secondary_rack.system_id | 64 | self.initial['secondary_rack'] = secondary_rack.system_id |
2120 | 63 | 65 | ||
2121 | 66 | def _set_up_relay_vlan(self): | ||
2122 | 67 | # Configure the relay_vlan fields to include only VLAN's that are | ||
2123 | 68 | # not already on a relay_vlan. If this is an update then it cannot | ||
2124 | 69 | # be itself or never set when dhcp_on is True. | ||
2125 | 70 | possible_relay_vlans = VLAN.objects.filter(relay_vlan__isnull=True) | ||
2126 | 71 | if self.instance is not None: | ||
2127 | 72 | possible_relay_vlans = possible_relay_vlans.exclude( | ||
2128 | 73 | id=self.instance.id) | ||
2129 | 74 | if self.instance.dhcp_on: | ||
2130 | 75 | possible_relay_vlans = VLAN.objects.none() | ||
2131 | 76 | if self.instance.relay_vlan is not None: | ||
2132 | 77 | possible_relay_vlans = VLAN.objects.filter( | ||
2133 | 78 | id=self.instance.relay_vlan.id) | ||
2134 | 79 | self.fields['relay_vlan'] = forms.ModelChoiceField( | ||
2135 | 80 | queryset=possible_relay_vlans, required=False) | ||
2136 | 81 | |||
2137 | 64 | def clean(self): | 82 | def clean(self): |
2138 | 65 | cleaned_data = super(VLANForm, self).clean() | 83 | cleaned_data = super(VLANForm, self).clean() |
2139 | 66 | # Automatically promote the secondary rack controller to the primary | 84 | # Automatically promote the secondary rack controller to the primary |
2140 | @@ -120,5 +138,12 @@ | |||
2141 | 120 | interface = super(VLANForm, self).save(commit=False) | 138 | interface = super(VLANForm, self).save(commit=False) |
2142 | 121 | if self.fabric is not None: | 139 | if self.fabric is not None: |
2143 | 122 | interface.fabric = self.fabric | 140 | interface.fabric = self.fabric |
2144 | 141 | if ('relay_vlan' in self.data and | ||
2145 | 142 | not self.cleaned_data.get('relay_vlan')): | ||
2146 | 143 | # relay_vlan is being cleared. | ||
2147 | 144 | interface.relay_vlan = None | ||
2148 | 145 | if interface.dhcp_on: | ||
2149 | 146 | # relay_vlan cannot be set when dhcp is on. | ||
2150 | 147 | interface.relay_vlan = None | ||
2151 | 123 | interface.save() | 148 | interface.save() |
2152 | 124 | return interface | 149 | return interface |
2153 | 125 | 150 | ||
2154 | === modified file 'src/maasserver/locks.py' | |||
2155 | --- src/maasserver/locks.py 2016-09-28 14:12:23 +0000 | |||
2156 | +++ src/maasserver/locks.py 2016-12-07 15:50:52 +0000 | |||
2157 | @@ -4,6 +4,7 @@ | |||
2158 | 4 | """Region-wide locks.""" | 4 | """Region-wide locks.""" |
2159 | 5 | 5 | ||
2160 | 6 | __all__ = [ | 6 | __all__ = [ |
2161 | 7 | "address_allocation", | ||
2162 | 7 | "dns", | 8 | "dns", |
2163 | 8 | "eventloop", | 9 | "eventloop", |
2164 | 9 | "import_images", | 10 | "import_images", |
2165 | @@ -11,7 +12,6 @@ | |||
2166 | 11 | "rack_registration", | 12 | "rack_registration", |
2167 | 12 | "security", | 13 | "security", |
2168 | 13 | "startup", | 14 | "startup", |
2169 | 14 | "staticip_acquire", | ||
2170 | 15 | ] | 15 | ] |
2171 | 16 | 16 | ||
2172 | 17 | from maasserver.utils.dblocks import ( | 17 | from maasserver.utils.dblocks import ( |
2173 | @@ -38,8 +38,8 @@ | |||
2174 | 38 | # Lock to prevent concurrent acquisition of nodes. | 38 | # Lock to prevent concurrent acquisition of nodes. |
2175 | 39 | node_acquire = DatabaseXactLock(7) | 39 | node_acquire = DatabaseXactLock(7) |
2176 | 40 | 40 | ||
2179 | 41 | # Lock to prevent concurrent allocation of StaticIPAddress | 41 | # Lock to help with concurrent allocation of IP addresses. |
2180 | 42 | staticip_acquire = DatabaseXactLock(8) | 42 | address_allocation = DatabaseLock(8) |
2181 | 43 | 43 | ||
2182 | 44 | # Lock to prevent concurrent registration of rack controllers. This can be a | 44 | # Lock to prevent concurrent registration of rack controllers. This can be a |
2183 | 45 | # problem because registration involves populating fabrics, VLANs, and other | 45 | # problem because registration involves populating fabrics, VLANs, and other |
2184 | 46 | 46 | ||
2185 | === modified file 'src/maasserver/management/commands/dbupgrade.py' | |||
2186 | --- src/maasserver/management/commands/dbupgrade.py 2016-09-04 19:57:50 +0000 | |||
2187 | +++ src/maasserver/management/commands/dbupgrade.py 2016-12-07 15:50:52 +0000 | |||
2188 | @@ -9,6 +9,7 @@ | |||
2189 | 9 | __all__ = [] | 9 | __all__ = [] |
2190 | 10 | 10 | ||
2191 | 11 | from importlib import import_module | 11 | from importlib import import_module |
2192 | 12 | import json | ||
2193 | 12 | import optparse | 13 | import optparse |
2194 | 13 | import os | 14 | import os |
2195 | 14 | import shutil | 15 | import shutil |
2196 | @@ -34,7 +35,7 @@ | |||
2197 | 34 | # Script that performs the south migrations for MAAS under django 1.6 and | 35 | # Script that performs the south migrations for MAAS under django 1.6 and |
2198 | 35 | # python2.7. | 36 | # python2.7. |
2199 | 36 | MAAS_UPGRADE_SCRIPT = """\ | 37 | MAAS_UPGRADE_SCRIPT = """\ |
2201 | 37 | # Copyright 2015 Canonical Ltd. This software is licensed under the | 38 | # Copyright 2015-2016 Canonical Ltd. This software is licensed under the |
2202 | 38 | # GNU Affero General Public License version 3 (see the file LICENSE). | 39 | # GNU Affero General Public License version 3 (see the file LICENSE). |
2203 | 39 | 40 | ||
2204 | 40 | from __future__ import ( | 41 | from __future__ import ( |
2205 | @@ -46,12 +47,49 @@ | |||
2206 | 46 | str = None | 47 | str = None |
2207 | 47 | 48 | ||
2208 | 48 | __metaclass__ = type | 49 | __metaclass__ = type |
2211 | 49 | __all__ = [ | 50 | __all__ = [] |
2210 | 50 | ] | ||
2212 | 51 | 51 | ||
2213 | 52 | import os | 52 | import os |
2214 | 53 | import sys | 53 | import sys |
2215 | 54 | 54 | ||
2216 | 55 | import django.conf | ||
2217 | 56 | |||
2218 | 57 | |||
2219 | 58 | class LazySettings(django.conf.LazySettings): | ||
2220 | 59 | '''Prevent Django from mangling warnings settings. | ||
2221 | 60 | |||
2222 | 61 | At present, Django adds a single filter that surfaces all deprecation | ||
2223 | 62 | warnings, but MAAS handles them differently. Django doesn't appear to give | ||
2224 | 63 | a way to prevent it from doing its thing, so we must undo its changes. | ||
2225 | 64 | |||
2226 | 65 | Deprecation warnings in production environments are not desirable as they | ||
2227 | 66 | are a developer tool, and not something an end user can reasonably do | ||
2228 | 67 | something about. This brings control of warnings back into MAAS's control. | ||
2229 | 68 | ''' | ||
2230 | 69 | |||
2231 | 70 | def _configure_logging(self): | ||
2232 | 71 | # This is a copy of *half* of Django's `_configure_logging`, omitting | ||
2233 | 72 | # the problematic bits. | ||
2234 | 73 | if self.LOGGING_CONFIG: | ||
2235 | 74 | from django.utils.log import DEFAULT_LOGGING | ||
2236 | 75 | from django.utils.module_loading import import_by_path | ||
2237 | 76 | # First find the logging configuration function ... | ||
2238 | 77 | logging_config_func = import_by_path(self.LOGGING_CONFIG) | ||
2239 | 78 | logging_config_func(DEFAULT_LOGGING) | ||
2240 | 79 | # ... then invoke it with the logging settings | ||
2241 | 80 | if self.LOGGING: | ||
2242 | 81 | logging_config_func(self.LOGGING) | ||
2243 | 82 | |||
2244 | 83 | |||
2245 | 84 | # Install our `LazySettings` as the Django-global settings class. First, | ||
2246 | 85 | # ensure that Django hasn't yet loaded its settings. | ||
2247 | 86 | assert not django.conf.settings.configured | ||
2248 | 87 | # This is needed because Django's `LazySettings` overrides `__setattr__`. | ||
2249 | 88 | object.__setattr__(django.conf.settings, "__class__", LazySettings) | ||
2250 | 89 | |||
2251 | 90 | # Force Django configuration. | ||
2252 | 91 | os.environ["DJANGO_SETTINGS_MODULE"] = "maas19settings" | ||
2253 | 92 | |||
2254 | 55 | # Inject the sys.path from the parent process so that the python path is | 93 | # Inject the sys.path from the parent process so that the python path is |
2255 | 56 | # is similar, except that the directory that this script is running from is | 94 | # is similar, except that the directory that this script is running from is |
2256 | 57 | # already the first path in sys.path. | 95 | # already the first path in sys.path. |
2257 | @@ -132,6 +170,11 @@ | |||
2258 | 132 | tempdir = tempfile.mkdtemp(prefix='maas-upgrade-') | 170 | tempdir = tempfile.mkdtemp(prefix='maas-upgrade-') |
2259 | 133 | subprocess.check_call([ | 171 | subprocess.check_call([ |
2260 | 134 | "tar", "zxf", path_to_tarball, "-C", tempdir]) | 172 | "tar", "zxf", path_to_tarball, "-C", tempdir]) |
2261 | 173 | |||
2262 | 174 | settings_json = os.path.join(tempdir, "maas19settings.json") | ||
2263 | 175 | with open(settings_json, "w", encoding="utf-8") as fd: | ||
2264 | 176 | fd.write(json.dumps({"DATABASES": settings.DATABASES})) | ||
2265 | 177 | |||
2266 | 135 | script_path = os.path.join(tempdir, "migrate.py") | 178 | script_path = os.path.join(tempdir, "migrate.py") |
2267 | 136 | with open(script_path, "wb") as fp: | 179 | with open(script_path, "wb") as fp: |
2268 | 137 | fp.write(MAAS_UPGRADE_SCRIPT.encode("utf-8")) | 180 | fp.write(MAAS_UPGRADE_SCRIPT.encode("utf-8")) |
2269 | 138 | 181 | ||
2270 | === modified file 'src/maasserver/management/commands/tests/test_dbupgrade.py' | |||
2271 | --- src/maasserver/management/commands/tests/test_dbupgrade.py 2016-03-28 13:54:47 +0000 | |||
2272 | +++ src/maasserver/management/commands/tests/test_dbupgrade.py 2016-12-07 15:50:52 +0000 | |||
2273 | @@ -64,7 +64,10 @@ | |||
2274 | 64 | env = os.environ.copy() | 64 | env = os.environ.copy() |
2275 | 65 | env["MAAS_PREVENT_MIGRATIONS"] = "0" | 65 | env["MAAS_PREVENT_MIGRATIONS"] = "0" |
2276 | 66 | mra = os.path.join(root, "bin", "maas-region") | 66 | mra = os.path.join(root, "bin", "maas-region") |
2278 | 67 | cmd = [mra, "dbupgrade", "--settings", "maas.settings"] | 67 | cmd = [ |
2279 | 68 | mra, "dbupgrade", "--settings", | ||
2280 | 69 | "maasserver.djangosettings.settings", | ||
2281 | 70 | ] | ||
2282 | 68 | if always_south: | 71 | if always_south: |
2283 | 69 | cmd.append("--always-south") | 72 | cmd.append("--always-south") |
2284 | 70 | self.execute(cmd, env=env) | 73 | self.execute(cmd, env=env) |
2285 | 71 | 74 | ||
2286 | === modified file 'src/maasserver/migrations/builtin/maasserver/0016_migrate_power_data_node_to_bmc.py' | |||
2287 | --- src/maasserver/migrations/builtin/maasserver/0016_migrate_power_data_node_to_bmc.py 2016-05-11 19:01:48 +0000 | |||
2288 | +++ src/maasserver/migrations/builtin/maasserver/0016_migrate_power_data_node_to_bmc.py 2016-12-07 15:50:52 +0000 | |||
2289 | @@ -2,10 +2,8 @@ | |||
2290 | 2 | 2 | ||
2291 | 3 | from django.db import migrations | 3 | from django.db import migrations |
2292 | 4 | from maasserver.models import timestampedmodel | 4 | from maasserver.models import timestampedmodel |
2297 | 5 | from provisioningserver.power.schema import ( | 5 | from provisioningserver.drivers import SETTING_SCOPE |
2298 | 6 | POWER_FIELDS_BY_TYPE, | 6 | from provisioningserver.drivers.power import PowerDriverRegistry |
2295 | 7 | POWER_PARAMETER_SCOPE, | ||
2296 | 8 | ) | ||
2299 | 9 | 7 | ||
2300 | 10 | # Copied from BMC model. | 8 | # Copied from BMC model. |
2301 | 11 | def scope_power_parameters(power_type, power_params): | 9 | def scope_power_parameters(power_type, power_params): |
2302 | @@ -14,16 +12,20 @@ | |||
2303 | 14 | if not power_type: | 12 | if not power_type: |
2304 | 15 | # If there is no power type, treat all params as node params. | 13 | # If there is no power type, treat all params as node params. |
2305 | 16 | return ({}, power_params) | 14 | return ({}, power_params) |
2307 | 17 | power_fields = POWER_FIELDS_BY_TYPE.get(power_type) | 15 | power_driver = PowerDriverRegistry.get_item(power_type) |
2308 | 16 | if power_driver is None: | ||
2309 | 17 | # If there is no power driver, treat all params as node params. | ||
2310 | 18 | return ({}, power_params) | ||
2311 | 19 | power_fields = power_driver.settings | ||
2312 | 18 | if not power_fields: | 20 | if not power_fields: |
2313 | 19 | # If there is no parameter info, treat all params as node params. | 21 | # If there is no parameter info, treat all params as node params. |
2314 | 20 | return ({}, power_params) | 22 | return ({}, power_params) |
2315 | 21 | bmc_params = {} | 23 | bmc_params = {} |
2316 | 22 | node_params = {} | 24 | node_params = {} |
2317 | 23 | for param_name in power_params: | 25 | for param_name in power_params: |
2319 | 24 | power_field = power_fields.get(param_name) | 26 | power_field = power_driver.get_setting(param_name) |
2320 | 25 | if power_field and power_field.get( | 27 | if power_field and power_field.get( |
2322 | 26 | 'scope') == POWER_PARAMETER_SCOPE.BMC: | 28 | 'scope') == SETTING_SCOPE.BMC: |
2323 | 27 | bmc_params[param_name] = power_params[param_name] | 29 | bmc_params[param_name] = power_params[param_name] |
2324 | 28 | else: | 30 | else: |
2325 | 29 | node_params[param_name] = power_params[param_name] | 31 | node_params[param_name] = power_params[param_name] |
2326 | 30 | 32 | ||
2327 | === modified file 'src/maasserver/migrations/builtin/maasserver/0022_extract_ip_for_bmcs.py' | |||
2328 | --- src/maasserver/migrations/builtin/maasserver/0022_extract_ip_for_bmcs.py 2016-05-11 19:01:48 +0000 | |||
2329 | +++ src/maasserver/migrations/builtin/maasserver/0022_extract_ip_for_bmcs.py 2016-12-07 15:50:52 +0000 | |||
2330 | @@ -8,7 +8,7 @@ | |||
2331 | 8 | ) | 8 | ) |
2332 | 9 | from maasserver.enum import IPADDRESS_TYPE | 9 | from maasserver.enum import IPADDRESS_TYPE |
2333 | 10 | from maasserver.models import timestampedmodel | 10 | from maasserver.models import timestampedmodel |
2335 | 11 | from provisioningserver.power.schema import POWER_TYPE_PARAMETERS_BY_NAME | 11 | from provisioningserver.drivers.power import PowerDriverRegistry |
2336 | 12 | 12 | ||
2337 | 13 | # Derived from Subnet model. | 13 | # Derived from Subnet model. |
2338 | 14 | def raw_subnet_id_containing_ip(ip): | 14 | def raw_subnet_id_containing_ip(ip): |
2339 | @@ -38,10 +38,13 @@ | |||
2340 | 38 | # power_address field, returns None. | 38 | # power_address field, returns None. |
2341 | 39 | if not power_type or not power_parameters: | 39 | if not power_type or not power_parameters: |
2342 | 40 | return None | 40 | return None |
2344 | 41 | power_type_parameters = POWER_TYPE_PARAMETERS_BY_NAME.get(power_type) | 41 | power_driver = PowerDriverRegistry.get_item(power_type) |
2345 | 42 | if power_driver is None: | ||
2346 | 43 | return None | ||
2347 | 44 | power_type_parameters = power_driver.settings | ||
2348 | 42 | if not power_type_parameters: | 45 | if not power_type_parameters: |
2349 | 43 | return None | 46 | return None |
2351 | 44 | ip_extractor = power_type_parameters.get('ip_extractor') | 47 | ip_extractor = power_driver.ip_extractor |
2352 | 45 | if not ip_extractor: | 48 | if not ip_extractor: |
2353 | 46 | return None | 49 | return None |
2354 | 47 | field_value = power_parameters.get(ip_extractor.get('field_name')) | 50 | field_value = power_parameters.get(ip_extractor.get('field_name')) |
2355 | 48 | 51 | ||
2356 | === modified file 'src/maasserver/migrations/builtin/maasserver/0027_replace_static_range_with_admin_reserved_ranges.py' | |||
2357 | --- src/maasserver/migrations/builtin/maasserver/0027_replace_static_range_with_admin_reserved_ranges.py 2016-05-11 19:01:48 +0000 | |||
2358 | +++ src/maasserver/migrations/builtin/maasserver/0027_replace_static_range_with_admin_reserved_ranges.py 2016-12-07 15:50:52 +0000 | |||
2359 | @@ -37,7 +37,7 @@ | |||
2360 | 37 | IPRange, subnet, ranges, created_time, range_description): | 37 | IPRange, subnet, ranges, created_time, range_description): |
2361 | 38 | unreserved_range_set = MAASIPSet(ranges) | 38 | unreserved_range_set = MAASIPSet(ranges) |
2362 | 39 | unreserved_ranges = unreserved_range_set.get_unused_ranges( | 39 | unreserved_ranges = unreserved_range_set.get_unused_ranges( |
2364 | 40 | subnet.cidr, comment="reserved") | 40 | subnet.cidr, purpose="reserved") |
2365 | 41 | for iprange in unreserved_ranges: | 41 | for iprange in unreserved_ranges: |
2366 | 42 | start_ip = str(IPAddress(iprange.first)) | 42 | start_ip = str(IPAddress(iprange.first)) |
2367 | 43 | end_ip = str(IPAddress(iprange.last)) | 43 | end_ip = str(IPAddress(iprange.last)) |
2368 | 44 | 44 | ||
2369 | === modified file 'src/maasserver/migrations/builtin/maasserver/0056_add_description_to_fabric_and_space.py' | |||
2370 | --- src/maasserver/migrations/builtin/maasserver/0056_add_description_to_fabric_and_space.py 2016-07-30 01:17:54 +0000 | |||
2371 | +++ src/maasserver/migrations/builtin/maasserver/0056_add_description_to_fabric_and_space.py 2016-12-07 15:50:52 +0000 | |||
2372 | @@ -44,6 +44,6 @@ | |||
2373 | 44 | migrations.AlterField( | 44 | migrations.AlterField( |
2374 | 45 | model_name='subnet', | 45 | model_name='subnet', |
2375 | 46 | name='vlan', | 46 | name='vlan', |
2377 | 47 | field=models.ForeignKey(to='maasserver.VLAN', default=maasserver.models.subnet.get_default_vlan, on_delete=django.db.models.deletion.PROTECT), | 47 | field=models.ForeignKey(to='maasserver.VLAN', default=None, on_delete=django.db.models.deletion.PROTECT), |
2378 | 48 | ), | 48 | ), |
2379 | 49 | ] | 49 | ] |
2380 | 50 | 50 | ||
2381 | === added file 'src/maasserver/migrations/builtin/maasserver/0094_add_unmanaged_subnets.py' | |||
2382 | --- src/maasserver/migrations/builtin/maasserver/0094_add_unmanaged_subnets.py 1970-01-01 00:00:00 +0000 | |||
2383 | +++ src/maasserver/migrations/builtin/maasserver/0094_add_unmanaged_subnets.py 2016-12-07 15:50:52 +0000 | |||
2384 | @@ -0,0 +1,22 @@ | |||
2385 | 1 | # -*- coding: utf-8 -*- | ||
2386 | 2 | from __future__ import unicode_literals | ||
2387 | 3 | |||
2388 | 4 | from django.db import ( | ||
2389 | 5 | migrations, | ||
2390 | 6 | models, | ||
2391 | 7 | ) | ||
2392 | 8 | |||
2393 | 9 | |||
2394 | 10 | class Migration(migrations.Migration): | ||
2395 | 11 | |||
2396 | 12 | dependencies = [ | ||
2397 | 13 | ('maasserver', '0093_add_rdns_model'), | ||
2398 | 14 | ] | ||
2399 | 15 | |||
2400 | 16 | operations = [ | ||
2401 | 17 | migrations.AddField( | ||
2402 | 18 | model_name='subnet', | ||
2403 | 19 | name='managed', | ||
2404 | 20 | field=models.BooleanField(default=True), | ||
2405 | 21 | ), | ||
2406 | 22 | ] | ||
2407 | 0 | 23 | ||
2408 | === added file 'src/maasserver/migrations/builtin/maasserver/0095_vlan_relay_vlan.py' | |||
2409 | --- src/maasserver/migrations/builtin/maasserver/0095_vlan_relay_vlan.py 1970-01-01 00:00:00 +0000 | |||
2410 | +++ src/maasserver/migrations/builtin/maasserver/0095_vlan_relay_vlan.py 2016-12-07 15:50:52 +0000 | |||
2411 | @@ -0,0 +1,23 @@ | |||
2412 | 1 | # -*- coding: utf-8 -*- | ||
2413 | 2 | from __future__ import unicode_literals | ||
2414 | 3 | |||
2415 | 4 | from django.db import ( | ||
2416 | 5 | migrations, | ||
2417 | 6 | models, | ||
2418 | 7 | ) | ||
2419 | 8 | import django.db.models.deletion | ||
2420 | 9 | |||
2421 | 10 | |||
2422 | 11 | class Migration(migrations.Migration): | ||
2423 | 12 | |||
2424 | 13 | dependencies = [ | ||
2425 | 14 | ('maasserver', '0094_add_unmanaged_subnets'), | ||
2426 | 15 | ] | ||
2427 | 16 | |||
2428 | 17 | operations = [ | ||
2429 | 18 | migrations.AddField( | ||
2430 | 19 | model_name='vlan', | ||
2431 | 20 | name='relay_vlan', | ||
2432 | 21 | field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, blank=True, related_name='relay_vlans', to='maasserver.VLAN'), | ||
2433 | 22 | ), | ||
2434 | 23 | ] | ||
2435 | 0 | 24 | ||
2436 | === added file 'src/maasserver/migrations/builtin/maasserver/0096_set_default_vlan_field.py' | |||
2437 | --- src/maasserver/migrations/builtin/maasserver/0096_set_default_vlan_field.py 1970-01-01 00:00:00 +0000 | |||
2438 | +++ src/maasserver/migrations/builtin/maasserver/0096_set_default_vlan_field.py 2016-12-07 15:50:52 +0000 | |||
2439 | @@ -0,0 +1,24 @@ | |||
2440 | 1 | # -*- coding: utf-8 -*- | ||
2441 | 2 | from __future__ import unicode_literals | ||
2442 | 3 | |||
2443 | 4 | from django.db import ( | ||
2444 | 5 | migrations, | ||
2445 | 6 | models, | ||
2446 | 7 | ) | ||
2447 | 8 | import django.db.models.deletion | ||
2448 | 9 | import maasserver.models.subnet | ||
2449 | 10 | |||
2450 | 11 | |||
2451 | 12 | class Migration(migrations.Migration): | ||
2452 | 13 | |||
2453 | 14 | dependencies = [ | ||
2454 | 15 | ('maasserver', '0095_vlan_relay_vlan'), | ||
2455 | 16 | ] | ||
2456 | 17 | |||
2457 | 18 | operations = [ | ||
2458 | 19 | migrations.AlterField( | ||
2459 | 20 | model_name='subnet', | ||
2460 | 21 | name='vlan', | ||
2461 | 22 | field=models.ForeignKey(to='maasserver.VLAN', default=maasserver.models.subnet.get_default_vlan, on_delete=django.db.models.deletion.PROTECT), | ||
2462 | 23 | ), | ||
2463 | 24 | ] | ||
2464 | 0 | 25 | ||
2465 | === added file 'src/maasserver/migrations/builtin/maasserver/0097_node_chassis_storage_hints.py' | |||
2466 | --- src/maasserver/migrations/builtin/maasserver/0097_node_chassis_storage_hints.py 1970-01-01 00:00:00 +0000 | |||
2467 | +++ src/maasserver/migrations/builtin/maasserver/0097_node_chassis_storage_hints.py 2016-12-07 15:50:52 +0000 | |||
2468 | @@ -0,0 +1,73 @@ | |||
2469 | 1 | # -*- coding: utf-8 -*- | ||
2470 | 2 | from __future__ import unicode_literals | ||
2471 | 3 | |||
2472 | 4 | from django.db import ( | ||
2473 | 5 | migrations, | ||
2474 | 6 | models, | ||
2475 | 7 | ) | ||
2476 | 8 | import django.db.models.deletion | ||
2477 | 9 | import maasserver.models.cleansave | ||
2478 | 10 | import maasserver.models.node | ||
2479 | 11 | |||
2480 | 12 | |||
2481 | 13 | class Migration(migrations.Migration): | ||
2482 | 14 | |||
2483 | 15 | dependencies = [ | ||
2484 | 16 | ('maasserver', '0096_set_default_vlan_field'), | ||
2485 | 17 | ] | ||
2486 | 18 | |||
2487 | 19 | operations = [ | ||
2488 | 20 | migrations.CreateModel( | ||
2489 | 21 | name='ChassisHints', | ||
2490 | 22 | fields=[ | ||
2491 | 23 | ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), | ||
2492 | 24 | ('cores', models.IntegerField(default=0)), | ||
2493 | 25 | ('memory', models.IntegerField(default=0)), | ||
2494 | 26 | ('local_storage', models.IntegerField(default=0)), | ||
2495 | 27 | ], | ||
2496 | 28 | bases=(maasserver.models.cleansave.CleanSave, models.Model), | ||
2497 | 29 | ), | ||
2498 | 30 | migrations.CreateModel( | ||
2499 | 31 | name='Chassis', | ||
2500 | 32 | fields=[ | ||
2501 | 33 | ], | ||
2502 | 34 | options={ | ||
2503 | 35 | 'proxy': True, | ||
2504 | 36 | }, | ||
2505 | 37 | bases=('maasserver.node',), | ||
2506 | 38 | ), | ||
2507 | 39 | migrations.CreateModel( | ||
2508 | 40 | name='Storage', | ||
2509 | 41 | fields=[ | ||
2510 | 42 | ], | ||
2511 | 43 | options={ | ||
2512 | 44 | 'proxy': True, | ||
2513 | 45 | }, | ||
2514 | 46 | bases=('maasserver.node',), | ||
2515 | 47 | ), | ||
2516 | 48 | migrations.AddField( | ||
2517 | 49 | model_name='node', | ||
2518 | 50 | name='cpu_speed', | ||
2519 | 51 | field=models.IntegerField(default=0), | ||
2520 | 52 | ), | ||
2521 | 53 | migrations.AddField( | ||
2522 | 54 | model_name='node', | ||
2523 | 55 | name='dynamic', | ||
2524 | 56 | field=models.BooleanField(default=False), | ||
2525 | 57 | ), | ||
2526 | 58 | migrations.AlterField( | ||
2527 | 59 | model_name='node', | ||
2528 | 60 | name='domain', | ||
2529 | 61 | field=models.ForeignKey(to='maasserver.Domain', null=True, default=maasserver.models.node.get_default_domain, blank=True, on_delete=django.db.models.deletion.PROTECT), | ||
2530 | 62 | ), | ||
2531 | 63 | migrations.AlterField( | ||
2532 | 64 | model_name='node', | ||
2533 | 65 | name='node_type', | ||
2534 | 66 | field=models.IntegerField(choices=[(0, 'Machine'), (1, 'Device'), (2, 'Rack controller'), (3, 'Region controller'), (4, 'Region and rack controller'), (5, 'Chassis'), (6, 'Storage')], default=0, editable=False), | ||
2535 | 67 | ), | ||
2536 | 68 | migrations.AddField( | ||
2537 | 69 | model_name='chassishints', | ||
2538 | 70 | name='chassis', | ||
2539 | 71 | field=models.OneToOneField(to='maasserver.Node', related_name='chassis_hints'), | ||
2540 | 72 | ), | ||
2541 | 73 | ] | ||
2542 | 0 | 74 | ||
2543 | === renamed file 'src/maasserver/migrations/south/django16_south_maas19.tar.gz' => 'src/maasserver/migrations/south/django16_south_maas19.tar.gz.OTHER' | |||
2544 | 1 | Binary files src/maasserver/migrations/south/django16_south_maas19.tar.gz 2016-11-22 00:53:43 +0000 and src/maasserver/migrations/south/django16_south_maas19.tar.gz.OTHER 2016-12-07 15:50:52 +0000 differ | 75 | Binary files src/maasserver/migrations/south/django16_south_maas19.tar.gz 2016-11-22 00:53:43 +0000 and src/maasserver/migrations/south/django16_south_maas19.tar.gz.OTHER 2016-12-07 15:50:52 +0000 differ |
2545 | === modified file 'src/maasserver/models/__init__.py' | |||
2546 | --- src/maasserver/models/__init__.py 2016-10-18 22:22:23 +0000 | |||
2547 | +++ src/maasserver/models/__init__.py 2016-12-07 15:50:52 +0000 | |||
2548 | @@ -16,6 +16,8 @@ | |||
2549 | 16 | 'BootSourceSelection', | 16 | 'BootSourceSelection', |
2550 | 17 | 'BridgeInterface', | 17 | 'BridgeInterface', |
2551 | 18 | 'CacheSet', | 18 | 'CacheSet', |
2552 | 19 | 'Chassis', | ||
2553 | 20 | 'ChassisHints', | ||
2554 | 19 | 'ComponentError', | 21 | 'ComponentError', |
2555 | 20 | 'Config', | 22 | 'Config', |
2556 | 21 | 'Controller', | 23 | 'Controller', |
2557 | @@ -59,6 +61,7 @@ | |||
2558 | 59 | 'RegionRackRPCConnection', | 61 | 'RegionRackRPCConnection', |
2559 | 60 | 'Service', | 62 | 'Service', |
2560 | 61 | 'Space', | 63 | 'Space', |
2561 | 64 | 'Storage', | ||
2562 | 62 | 'SSHKey', | 65 | 'SSHKey', |
2563 | 63 | 'SSLKey', | 66 | 'SSLKey', |
2564 | 64 | 'StaticIPAddress', | 67 | 'StaticIPAddress', |
2565 | @@ -98,6 +101,7 @@ | |||
2566 | 98 | from maasserver.models.bootsourcecache import BootSourceCache | 101 | from maasserver.models.bootsourcecache import BootSourceCache |
2567 | 99 | from maasserver.models.bootsourceselection import BootSourceSelection | 102 | from maasserver.models.bootsourceselection import BootSourceSelection |
2568 | 100 | from maasserver.models.cacheset import CacheSet | 103 | from maasserver.models.cacheset import CacheSet |
2569 | 104 | from maasserver.models.chassishints import ChassisHints | ||
2570 | 101 | from maasserver.models.component_error import ComponentError | 105 | from maasserver.models.component_error import ComponentError |
2571 | 102 | from maasserver.models.config import Config | 106 | from maasserver.models.config import Config |
2572 | 103 | from maasserver.models.dhcpsnippet import DHCPSnippet | 107 | from maasserver.models.dhcpsnippet import DHCPSnippet |
2573 | @@ -133,6 +137,7 @@ | |||
2574 | 133 | from maasserver.models.mdns import MDNS | 137 | from maasserver.models.mdns import MDNS |
2575 | 134 | from maasserver.models.neighbour import Neighbour | 138 | from maasserver.models.neighbour import Neighbour |
2576 | 135 | from maasserver.models.node import ( | 139 | from maasserver.models.node import ( |
2577 | 140 | Chassis, | ||
2578 | 136 | Controller, | 141 | Controller, |
2579 | 137 | Device, | 142 | Device, |
2580 | 138 | Machine, | 143 | Machine, |
2581 | @@ -140,6 +145,7 @@ | |||
2582 | 140 | NodeGroupToRackController, | 145 | NodeGroupToRackController, |
2583 | 141 | RackController, | 146 | RackController, |
2584 | 142 | RegionController, | 147 | RegionController, |
2585 | 148 | Storage, | ||
2586 | 143 | ) | 149 | ) |
2587 | 144 | from maasserver.models.ownerdata import OwnerData | 150 | from maasserver.models.ownerdata import OwnerData |
2588 | 145 | from maasserver.models.packagerepository import PackageRepository | 151 | from maasserver.models.packagerepository import PackageRepository |
2589 | 146 | 152 | ||
2590 | === modified file 'src/maasserver/models/bmc.py' | |||
2591 | --- src/maasserver/models/bmc.py 2016-06-04 00:21:58 +0000 | |||
2592 | +++ src/maasserver/models/bmc.py 2016-12-07 15:50:52 +0000 | |||
2593 | @@ -25,12 +25,9 @@ | |||
2594 | 25 | from maasserver.models.subnet import Subnet | 25 | from maasserver.models.subnet import Subnet |
2595 | 26 | from maasserver.models.timestampedmodel import TimestampedModel | 26 | from maasserver.models.timestampedmodel import TimestampedModel |
2596 | 27 | from maasserver.rpc import getAllClients | 27 | from maasserver.rpc import getAllClients |
2597 | 28 | from provisioningserver.drivers import SETTING_SCOPE | ||
2598 | 29 | from provisioningserver.drivers.power import PowerDriverRegistry | ||
2599 | 28 | from provisioningserver.logger import get_maas_logger | 30 | from provisioningserver.logger import get_maas_logger |
2600 | 29 | from provisioningserver.power.schema import ( | ||
2601 | 30 | POWER_FIELDS_BY_TYPE, | ||
2602 | 31 | POWER_PARAMETER_SCOPE, | ||
2603 | 32 | POWER_TYPE_PARAMETERS_BY_NAME, | ||
2604 | 33 | ) | ||
2605 | 34 | 31 | ||
2606 | 35 | 32 | ||
2607 | 36 | maaslog = get_maas_logger("node") | 33 | maaslog = get_maas_logger("node") |
2608 | @@ -125,16 +122,20 @@ | |||
2609 | 125 | if not power_type: | 122 | if not power_type: |
2610 | 126 | # If there is no power type, treat all params as node params. | 123 | # If there is no power type, treat all params as node params. |
2611 | 127 | return ({}, power_params) | 124 | return ({}, power_params) |
2613 | 128 | power_fields = POWER_FIELDS_BY_TYPE.get(power_type) | 125 | power_driver = PowerDriverRegistry.get_item(power_type) |
2614 | 126 | if power_driver is None: | ||
2615 | 127 | # If there is no power driver, treat all params as node params. | ||
2616 | 128 | return ({}, power_params) | ||
2617 | 129 | power_fields = power_driver.settings | ||
2618 | 129 | if not power_fields: | 130 | if not power_fields: |
2619 | 130 | # If there is no parameter info, treat all params as node params. | 131 | # If there is no parameter info, treat all params as node params. |
2620 | 131 | return ({}, power_params) | 132 | return ({}, power_params) |
2621 | 132 | bmc_params = {} | 133 | bmc_params = {} |
2622 | 133 | node_params = {} | 134 | node_params = {} |
2623 | 134 | for param_name in power_params: | 135 | for param_name in power_params: |
2625 | 135 | power_field = power_fields.get(param_name) | 136 | power_field = power_driver.get_setting(param_name) |
2626 | 136 | if (power_field and | 137 | if (power_field and |
2628 | 137 | power_field.get('scope') == POWER_PARAMETER_SCOPE.BMC): | 138 | power_field.get('scope') == SETTING_SCOPE.BMC): |
2629 | 138 | bmc_params[param_name] = power_params[param_name] | 139 | bmc_params[param_name] = power_params[param_name] |
2630 | 139 | else: | 140 | else: |
2631 | 140 | node_params[param_name] = power_params[param_name] | 141 | node_params[param_name] = power_params[param_name] |
2632 | @@ -148,12 +149,17 @@ | |||
2633 | 148 | if not power_type or not power_parameters: | 149 | if not power_type or not power_parameters: |
2634 | 149 | # Nothing to extract. | 150 | # Nothing to extract. |
2635 | 150 | return None | 151 | return None |
2637 | 151 | power_type_parameters = POWER_TYPE_PARAMETERS_BY_NAME.get(power_type) | 152 | power_driver = PowerDriverRegistry.get_item(power_type) |
2638 | 153 | if power_driver is None: | ||
2639 | 154 | maaslog.warning( | ||
2640 | 155 | "No power driver for power type %s" % power_type) | ||
2641 | 156 | return None | ||
2642 | 157 | power_type_parameters = power_driver.settings | ||
2643 | 152 | if not power_type_parameters: | 158 | if not power_type_parameters: |
2644 | 153 | maaslog.warning( | 159 | maaslog.warning( |
2646 | 154 | "No POWER_TYPE_PARAMETERS for power type %s" % power_type) | 160 | "No power driver settings for power type %s" % power_type) |
2647 | 155 | return None | 161 | return None |
2649 | 156 | ip_extractor = power_type_parameters.get('ip_extractor') | 162 | ip_extractor = power_driver.ip_extractor |
2650 | 157 | if not ip_extractor: | 163 | if not ip_extractor: |
2651 | 158 | maaslog.info( | 164 | maaslog.info( |
2652 | 159 | "No IP extractor configured for power type %s. " | 165 | "No IP extractor configured for power type %s. " |
2653 | 160 | 166 | ||
2654 | === added file 'src/maasserver/models/chassishints.py' | |||
2655 | --- src/maasserver/models/chassishints.py 1970-01-01 00:00:00 +0000 | |||
2656 | +++ src/maasserver/models/chassishints.py 2016-12-07 15:50:52 +0000 | |||
2657 | @@ -0,0 +1,33 @@ | |||
2658 | 1 | # Copyright 2016 Canonical Ltd. This software is licensed under the | ||
2659 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
2660 | 3 | |||
2661 | 4 | """Model that holds hint information for a Chassis.""" | ||
2662 | 5 | |||
2663 | 6 | __all__ = [ | ||
2664 | 7 | 'ChassisHints', | ||
2665 | 8 | ] | ||
2666 | 9 | |||
2667 | 10 | |||
2668 | 11 | from django.db.models import ( | ||
2669 | 12 | IntegerField, | ||
2670 | 13 | Model, | ||
2671 | 14 | OneToOneField, | ||
2672 | 15 | ) | ||
2673 | 16 | from maasserver import DefaultMeta | ||
2674 | 17 | from maasserver.models.cleansave import CleanSave | ||
2675 | 18 | from maasserver.models.node import Node | ||
2676 | 19 | |||
2677 | 20 | |||
2678 | 21 | class ChassisHints(CleanSave, Model): | ||
2679 | 22 | """Hint information for a chassis.""" | ||
2680 | 23 | |||
2681 | 24 | class Meta(DefaultMeta): | ||
2682 | 25 | """Needed for South to recognize this model.""" | ||
2683 | 26 | |||
2684 | 27 | chassis = OneToOneField(Node, related_name="chassis_hints") | ||
2685 | 28 | |||
2686 | 29 | cores = IntegerField(default=0) | ||
2687 | 30 | |||
2688 | 31 | memory = IntegerField(default=0) | ||
2689 | 32 | |||
2690 | 33 | local_storage = IntegerField(default=0) | ||
2691 | 0 | 34 | ||
2692 | === modified file 'src/maasserver/models/event.py' | |||
2693 | --- src/maasserver/models/event.py 2016-10-25 13:57:02 +0000 | |||
2694 | +++ src/maasserver/models/event.py 2016-12-07 15:50:52 +0000 | |||
2695 | @@ -1,4 +1,4 @@ | |||
2697 | 1 | # Copyright 2014-2015 Canonical Ltd. This software is licensed under the | 1 | # Copyright 2014-2016 Canonical Ltd. This software is licensed under the |
2698 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). |
2699 | 3 | 3 | ||
2700 | 4 | """:class:`Event` and friends.""" | 4 | """:class:`Event` and friends.""" |
2701 | @@ -21,6 +21,7 @@ | |||
2702 | 21 | from maasserver.models.timestampedmodel import TimestampedModel | 21 | from maasserver.models.timestampedmodel import TimestampedModel |
2703 | 22 | from provisioningserver.events import EVENT_DETAILS | 22 | from provisioningserver.events import EVENT_DETAILS |
2704 | 23 | from provisioningserver.logger import get_maas_logger | 23 | from provisioningserver.logger import get_maas_logger |
2705 | 24 | from provisioningserver.utils.env import get_maas_id | ||
2706 | 24 | 25 | ||
2707 | 25 | 26 | ||
2708 | 26 | maaslog = get_maas_logger('models.event') | 27 | maaslog = get_maas_logger('models.event') |
2709 | @@ -56,6 +57,12 @@ | |||
2710 | 56 | event_action=event_action, | 57 | event_action=event_action, |
2711 | 57 | event_description=event_description) | 58 | event_description=event_description) |
2712 | 58 | 59 | ||
2713 | 60 | def create_region_event(self, event_type, event_description=''): | ||
2714 | 61 | """Helper to register event and event type for the running region.""" | ||
2715 | 62 | self.create_node_event( | ||
2716 | 63 | system_id=get_maas_id(), event_type=event_type, | ||
2717 | 64 | event_description=event_description) | ||
2718 | 65 | |||
2719 | 59 | 66 | ||
2720 | 60 | class Event(CleanSave, TimestampedModel): | 67 | class Event(CleanSave, TimestampedModel): |
2721 | 61 | """An `Event` represents a MAAS event. | 68 | """An `Event` represents a MAAS event. |
2722 | 62 | 69 | ||
2723 | === modified file 'src/maasserver/models/node.py' | |||
2724 | --- src/maasserver/models/node.py 2016-12-07 11:26:49 +0000 | |||
2725 | +++ src/maasserver/models/node.py 2016-12-07 15:50:52 +0000 | |||
2726 | @@ -158,12 +158,12 @@ | |||
2727 | 158 | ) | 158 | ) |
2728 | 159 | import petname | 159 | import petname |
2729 | 160 | from piston3.models import Token | 160 | from piston3.models import Token |
2730 | 161 | from provisioningserver.drivers.power import PowerDriverRegistry | ||
2731 | 161 | from provisioningserver.events import ( | 162 | from provisioningserver.events import ( |
2732 | 162 | EVENT_DETAILS, | 163 | EVENT_DETAILS, |
2733 | 163 | EVENT_TYPES, | 164 | EVENT_TYPES, |
2734 | 164 | ) | 165 | ) |
2735 | 165 | from provisioningserver.logger import get_maas_logger | 166 | from provisioningserver.logger import get_maas_logger |
2736 | 166 | from provisioningserver.power import QUERY_POWER_TYPES | ||
2737 | 167 | from provisioningserver.refresh import ( | 167 | from provisioningserver.refresh import ( |
2738 | 168 | get_sys_info, | 168 | get_sys_info, |
2739 | 169 | refresh, | 169 | refresh, |
2740 | @@ -265,32 +265,6 @@ | |||
2741 | 265 | "we could find no unused node identifiers." % attempt) | 265 | "we could find no unused node identifiers." % attempt) |
2742 | 266 | 266 | ||
2743 | 267 | 267 | ||
2744 | 268 | def typecast_node(node, model): | ||
2745 | 269 | """Typecast a node object into a node type object.""" | ||
2746 | 270 | assert(isinstance(node, Node)) | ||
2747 | 271 | assert(issubclass(model, Node)) | ||
2748 | 272 | node.__class__ = model | ||
2749 | 273 | return node | ||
2750 | 274 | |||
2751 | 275 | |||
2752 | 276 | def typecast_to_node_type(node): | ||
2753 | 277 | """Typecast a node object to what the node_type is set to.""" | ||
2754 | 278 | if node.node_type == NODE_TYPE.MACHINE: | ||
2755 | 279 | return typecast_node(node, Machine) | ||
2756 | 280 | elif node.node_type in ( | ||
2757 | 281 | NODE_TYPE.RACK_CONTROLLER, | ||
2758 | 282 | NODE_TYPE.REGION_AND_RACK_CONTROLLER): | ||
2759 | 283 | # XXX ltrager 18-02-2016 - Currently only rack controllers have | ||
2760 | 284 | # unique functionality so when combined return a rack controller | ||
2761 | 285 | return typecast_node(node, RackController) | ||
2762 | 286 | elif node.node_type == NODE_TYPE.REGION_CONTROLLER: | ||
2763 | 287 | return typecast_node(node, RegionController) | ||
2764 | 288 | elif node.node_type == NODE_TYPE.DEVICE: | ||
2765 | 289 | return typecast_node(node, Device) | ||
2766 | 290 | else: | ||
2767 | 291 | raise NotImplementedError("Unknown node type %d" % node.node_type) | ||
2768 | 292 | |||
2769 | 293 | |||
2770 | 294 | class NodeQueriesMixin(MAASQueriesMixin): | 268 | class NodeQueriesMixin(MAASQueriesMixin): |
2771 | 295 | 269 | ||
2772 | 296 | def filter_by_spaces(self, spaces): | 270 | def filter_by_spaces(self, spaces): |
2773 | @@ -520,7 +494,7 @@ | |||
2774 | 520 | node = get_object_or_404( | 494 | node = get_object_or_404( |
2775 | 521 | self.model, system_id=system_id, **kwargs) | 495 | self.model, system_id=system_id, **kwargs) |
2776 | 522 | if user.has_perm(perm, node): | 496 | if user.has_perm(perm, node): |
2778 | 523 | return typecast_to_node_type(node) | 497 | return node.as_self() |
2779 | 524 | else: | 498 | else: |
2780 | 525 | raise PermissionDenied() | 499 | raise PermissionDenied() |
2781 | 526 | 500 | ||
2782 | @@ -573,6 +547,19 @@ | |||
2783 | 573 | extra_filters = {'node_type': NODE_TYPE.DEVICE} | 547 | extra_filters = {'node_type': NODE_TYPE.DEVICE} |
2784 | 574 | 548 | ||
2785 | 575 | 549 | ||
2786 | 550 | class ChassisManager(BaseNodeManager): | ||
2787 | 551 | """Chassis are nodes that contain or can compose more machines or | ||
2788 | 552 | storage.""" | ||
2789 | 553 | |||
2790 | 554 | extra_filters = {'node_type': NODE_TYPE.CHASSIS} | ||
2791 | 555 | |||
2792 | 556 | |||
2793 | 557 | class StorageManager(BaseNodeManager): | ||
2794 | 558 | """Storage are nodes that provide storage to other machines.""" | ||
2795 | 559 | |||
2796 | 560 | extra_filters = {'node_type': NODE_TYPE.STORAGE} | ||
2797 | 561 | |||
2798 | 562 | |||
2799 | 576 | class ControllerManager(BaseNodeManager): | 563 | class ControllerManager(BaseNodeManager): |
2800 | 577 | """All controllers `RackController`, `RegionController`, and | 564 | """All controllers `RackController`, `RegionController`, and |
2801 | 578 | `RegionRackController`.""" | 565 | `RegionRackController`.""" |
2802 | @@ -746,7 +733,8 @@ | |||
2803 | 746 | update_fields.append("owner") | 733 | update_fields.append("owner") |
2804 | 747 | if len(update_fields) > 0: | 734 | if len(update_fields) > 0: |
2805 | 748 | node.save(update_fields=update_fields) | 735 | node.save(update_fields=update_fields) |
2807 | 749 | return typecast_node(node, self.model) | 736 | # Always cast to a region controller. |
2808 | 737 | return node.as_region_controller() | ||
2809 | 750 | 738 | ||
2810 | 751 | def _create_running_controller(self): | 739 | def _create_running_controller(self): |
2811 | 752 | """Create a region controller for the host machine. | 740 | """Create a region controller for the host machine. |
2812 | @@ -844,7 +832,7 @@ | |||
2813 | 844 | # What Domain do we use for this host unless the individual StaticIPAddress | 832 | # What Domain do we use for this host unless the individual StaticIPAddress |
2814 | 845 | # record overrides it? | 833 | # record overrides it? |
2815 | 846 | domain = ForeignKey( | 834 | domain = ForeignKey( |
2817 | 847 | Domain, default=get_default_domain, null=False, | 835 | Domain, default=get_default_domain, null=True, blank=True, |
2818 | 848 | editable=True, on_delete=PROTECT) | 836 | editable=True, on_delete=PROTECT) |
2819 | 849 | 837 | ||
2820 | 850 | # TTL for this Node's IP addresses. Since this must be the same for all | 838 | # TTL for this Node's IP addresses. Since this must be the same for all |
2821 | @@ -904,6 +892,7 @@ | |||
2822 | 904 | # Juju expects the following standard constraints, which are stored here | 892 | # Juju expects the following standard constraints, which are stored here |
2823 | 905 | # as a basic optimisation over querying the lshw output. | 893 | # as a basic optimisation over querying the lshw output. |
2824 | 906 | cpu_count = IntegerField(default=0) | 894 | cpu_count = IntegerField(default=0) |
2825 | 895 | cpu_speed = IntegerField(default=0) # MHz | ||
2826 | 907 | memory = IntegerField(default=0) | 896 | memory = IntegerField(default=0) |
2827 | 908 | 897 | ||
2828 | 909 | swap_size = BigIntegerField(null=True, blank=True, default=None) | 898 | swap_size = BigIntegerField(null=True, blank=True, default=None) |
2829 | @@ -945,6 +934,11 @@ | |||
2830 | 945 | 934 | ||
2831 | 946 | license_key = CharField(max_length=30, null=True, blank=True) | 935 | license_key = CharField(max_length=30, null=True, blank=True) |
2832 | 947 | 936 | ||
2833 | 937 | # Only used by Machine. Set to True when the machine was composed | ||
2834 | 938 | # dynamically from a Chassis during allocation. When the machine is | ||
2835 | 939 | # released it will be deleted. | ||
2836 | 940 | dynamic = BooleanField(default=False) | ||
2837 | 941 | |||
2838 | 948 | tags = ManyToManyField(Tag) | 942 | tags = ManyToManyField(Tag) |
2839 | 949 | 943 | ||
2840 | 950 | # Record the Interface the node last booted from. | 944 | # Record the Interface the node last booted from. |
2841 | @@ -1120,7 +1114,10 @@ | |||
2842 | 1120 | 1114 | ||
2843 | 1121 | Return the FQDN for this host. | 1115 | Return the FQDN for this host. |
2844 | 1122 | """ | 1116 | """ |
2846 | 1123 | return '%s.%s' % (self.hostname, self.domain.name) | 1117 | if self.domain is not None: |
2847 | 1118 | return '%s.%s' % (self.hostname, self.domain.name) | ||
2848 | 1119 | else: | ||
2849 | 1120 | return self.hostname | ||
2850 | 1124 | 1121 | ||
2851 | 1125 | def get_deployment_time(self): | 1122 | def get_deployment_time(self): |
2852 | 1126 | """Return the deployment time of this node (in seconds). | 1123 | """Return the deployment time of this node (in seconds). |
2853 | @@ -1704,7 +1701,9 @@ | |||
2854 | 1704 | # Node.start() has synchronous and asynchronous parts, so catch | 1701 | # Node.start() has synchronous and asynchronous parts, so catch |
2855 | 1705 | # exceptions arising synchronously, and chain callbacks to the | 1702 | # exceptions arising synchronously, and chain callbacks to the |
2856 | 1706 | # Deferred it returns for the asynchronous (post-commit) bits. | 1703 | # Deferred it returns for the asynchronous (post-commit) bits. |
2858 | 1707 | starting = self._start(user, commissioning_user_data, old_status) | 1704 | starting = self._start( |
2859 | 1705 | user, commissioning_user_data, old_status, | ||
2860 | 1706 | allow_power_cycle=True) | ||
2861 | 1708 | except Exception as error: | 1707 | except Exception as error: |
2862 | 1709 | self.status = old_status | 1708 | self.status = old_status |
2863 | 1710 | self.save() | 1709 | self.save() |
2864 | @@ -2098,7 +2097,11 @@ | |||
2865 | 2098 | else: | 2097 | else: |
2866 | 2099 | can_be_started = True | 2098 | can_be_started = True |
2867 | 2100 | can_be_stopped = True | 2099 | can_be_stopped = True |
2869 | 2101 | can_be_queried = power_type in QUERY_POWER_TYPES | 2100 | power_driver = PowerDriverRegistry.get_item(power_type) |
2870 | 2101 | if power_driver is not None: | ||
2871 | 2102 | can_be_queried = power_driver.queryable | ||
2872 | 2103 | else: | ||
2873 | 2104 | can_be_queried = False | ||
2874 | 2102 | return PowerInfo( | 2105 | return PowerInfo( |
2875 | 2103 | can_be_started, can_be_stopped, can_be_queried, | 2106 | can_be_started, can_be_stopped, can_be_queried, |
2876 | 2104 | power_type, power_params, | 2107 | power_type, power_params, |
2877 | @@ -2193,7 +2196,8 @@ | |||
2878 | 2193 | # Node.start() has synchronous and asynchronous parts, so catch | 2196 | # Node.start() has synchronous and asynchronous parts, so catch |
2879 | 2194 | # exceptions arising synchronously, and chain callbacks to the | 2197 | # exceptions arising synchronously, and chain callbacks to the |
2880 | 2195 | # Deferred it returns for the asynchronous (post-commit) bits. | 2198 | # Deferred it returns for the asynchronous (post-commit) bits. |
2882 | 2196 | starting = self._start(user, disk_erase_user_data, old_status) | 2199 | starting = self._start( |
2883 | 2200 | user, disk_erase_user_data, old_status, allow_power_cycle=True) | ||
2884 | 2197 | except Exception as error: | 2201 | except Exception as error: |
2885 | 2198 | # We always mark the node as failed here, although we could | 2202 | # We always mark the node as failed here, although we could |
2886 | 2199 | # potentially move it back to the state it was in previously. For | 2203 | # potentially move it back to the state it was in previously. For |
2887 | @@ -2378,7 +2382,7 @@ | |||
2888 | 2378 | if self.power_state == POWER_STATE.OFF: | 2382 | if self.power_state == POWER_STATE.OFF: |
2889 | 2379 | # The node is already powered off; we can deallocate all attached | 2383 | # The node is already powered off; we can deallocate all attached |
2890 | 2380 | # resources and mark the node READY without delay. | 2384 | # resources and mark the node READY without delay. |
2892 | 2381 | release_to_ready = True | 2385 | finalize_release = True |
2893 | 2382 | elif self.get_effective_power_info().can_be_queried: | 2386 | elif self.get_effective_power_info().can_be_queried: |
2894 | 2383 | # Controlled power type (one for which we can query the power | 2387 | # Controlled power type (one for which we can query the power |
2895 | 2384 | # state): update_power_state() will take care of making the node | 2388 | # state): update_power_state() will take care of making the node |
2896 | @@ -2387,13 +2391,13 @@ | |||
2897 | 2387 | post_commit().addCallback( | 2391 | post_commit().addCallback( |
2898 | 2388 | callOutToDatabase, Node._set_status_expires, | 2392 | callOutToDatabase, Node._set_status_expires, |
2899 | 2389 | self.system_id, self.get_releasing_time()) | 2393 | self.system_id, self.get_releasing_time()) |
2901 | 2390 | release_to_ready = False | 2394 | finalize_release = False |
2902 | 2391 | else: | 2395 | else: |
2903 | 2392 | # The node's power cannot be reliably controlled. Frankly, this | 2396 | # The node's power cannot be reliably controlled. Frankly, this |
2904 | 2393 | # node is not suitable for use with MAAS. Deallocate all attached | 2397 | # node is not suitable for use with MAAS. Deallocate all attached |
2905 | 2394 | # resources and mark the node READY without delay because there's | 2398 | # resources and mark the node READY without delay because there's |
2906 | 2395 | # not much else we can do. | 2399 | # not much else we can do. |
2908 | 2396 | release_to_ready = True | 2400 | finalize_release = True |
2909 | 2397 | 2401 | ||
2910 | 2398 | self.status = NODE_STATUS.RELEASING | 2402 | self.status = NODE_STATUS.RELEASING |
2911 | 2399 | self.token = None | 2403 | self.token = None |
2912 | @@ -2418,25 +2422,29 @@ | |||
2913 | 2418 | self.children.all().delete() | 2422 | self.children.all().delete() |
2914 | 2419 | 2423 | ||
2915 | 2420 | # Power was off or cannot be powered off so release to ready now. | 2424 | # Power was off or cannot be powered off so release to ready now. |
2918 | 2421 | if release_to_ready: | 2425 | if finalize_release: |
2919 | 2422 | self._release_to_ready() | 2426 | self._finalize_release() |
2920 | 2423 | 2427 | ||
2921 | 2424 | @transactional | 2428 | @transactional |
2924 | 2425 | def _release_to_ready(self): | 2429 | def _finalize_release(self): |
2925 | 2426 | """Release all remaining resources and mark the node `READY`. | 2430 | """Release all remaining resources, mark the machine `READY` if not |
2926 | 2431 | dynamic, otherwise delete the machine. | ||
2927 | 2427 | 2432 | ||
2928 | 2428 | Releasing a node can be straightforward or it can be a multi-step | 2433 | Releasing a node can be straightforward or it can be a multi-step |
2929 | 2429 | operation, which can include a reboot in order to erase disks, then a | 2434 | operation, which can include a reboot in order to erase disks, then a |
2930 | 2430 | final power-down. This method should be the absolute last method | 2435 | final power-down. This method should be the absolute last method |
2931 | 2431 | called. | 2436 | called. |
2932 | 2432 | """ | 2437 | """ |
2937 | 2433 | self.release_interface_config() | 2438 | if self.dynamic: |
2938 | 2434 | self.status = NODE_STATUS.READY | 2439 | self.delete() |
2939 | 2435 | self.owner = None | 2440 | else: |
2940 | 2436 | self.save() | 2441 | self.release_interface_config() |
2941 | 2442 | self.status = NODE_STATUS.READY | ||
2942 | 2443 | self.owner = None | ||
2943 | 2444 | self.save() | ||
2944 | 2437 | 2445 | ||
2947 | 2438 | # Remove all set owner data. | 2446 | # Remove all set owner data. |
2948 | 2439 | OwnerData.objects.filter(node=self).delete() | 2447 | OwnerData.objects.filter(node=self).delete() |
2949 | 2440 | 2448 | ||
2950 | 2441 | def release_or_erase( | 2449 | def release_or_erase( |
2951 | 2442 | self, user, comment=None, | 2450 | self, user, comment=None, |
2952 | @@ -2551,7 +2559,7 @@ | |||
2953 | 2551 | if mark_ready: | 2559 | if mark_ready: |
2954 | 2552 | # Ensure the node is released when it powers down. | 2560 | # Ensure the node is released when it powers down. |
2955 | 2553 | self.status_expires = None | 2561 | self.status_expires = None |
2957 | 2554 | self._release_to_ready() | 2562 | self._finalize_release() |
2958 | 2555 | if self.status == NODE_STATUS.EXITING_RESCUE_MODE: | 2563 | if self.status == NODE_STATUS.EXITING_RESCUE_MODE: |
2959 | 2556 | if self.previous_status == NODE_STATUS.BROKEN: | 2564 | if self.previous_status == NODE_STATUS.BROKEN: |
2960 | 2557 | if power_state == POWER_STATE.OFF: | 2565 | if power_state == POWER_STATE.OFF: |
2961 | @@ -3074,14 +3082,18 @@ | |||
2962 | 3074 | # You can't start a node you don't own unless you're an admin. | 3082 | # You can't start a node you don't own unless you're an admin. |
2963 | 3075 | raise PermissionDenied() | 3083 | raise PermissionDenied() |
2964 | 3076 | event = EVENT_TYPES.REQUEST_NODE_START | 3084 | event = EVENT_TYPES.REQUEST_NODE_START |
2965 | 3085 | allow_power_cycle = False | ||
2966 | 3077 | # If status is ALLOCATED, this start is actually for a deployment. | 3086 | # If status is ALLOCATED, this start is actually for a deployment. |
2967 | 3078 | # (Note: this is true even when nodes are being deployed from READY | 3087 | # (Note: this is true even when nodes are being deployed from READY |
2968 | 3079 | # state. See node_action.py; the node is acquired and then started.) | 3088 | # state. See node_action.py; the node is acquired and then started.) |
2969 | 3089 | # Power cycling is allowed when deployment is being started. | ||
2970 | 3080 | if self.status == NODE_STATUS.ALLOCATED: | 3090 | if self.status == NODE_STATUS.ALLOCATED: |
2971 | 3081 | event = EVENT_TYPES.REQUEST_NODE_START_DEPLOYMENT | 3091 | event = EVENT_TYPES.REQUEST_NODE_START_DEPLOYMENT |
2972 | 3092 | allow_power_cycle = True | ||
2973 | 3082 | self._register_request_event( | 3093 | self._register_request_event( |
2974 | 3083 | user, event, action='start', comment=comment) | 3094 | user, event, action='start', comment=comment) |
2976 | 3084 | return self._start(user, user_data) | 3095 | return self._start( |
2977 | 3096 | user, user_data, allow_power_cycle=allow_power_cycle) | ||
2978 | 3085 | 3097 | ||
2979 | 3086 | def _get_bmc_client_connection_info(self, *args, **kwargs): | 3098 | def _get_bmc_client_connection_info(self, *args, **kwargs): |
2980 | 3087 | """Return a tuple that list the rack controllers that can communicate | 3099 | """Return a tuple that list the rack controllers that can communicate |
2981 | @@ -3114,7 +3126,9 @@ | |||
2982 | 3114 | return client_idents, fallback_idents | 3126 | return client_idents, fallback_idents |
2983 | 3115 | 3127 | ||
2984 | 3116 | @transactional | 3128 | @transactional |
2986 | 3117 | def _start(self, user, user_data=None, old_status=None): | 3129 | def _start( |
2987 | 3130 | self, user, user_data=None, old_status=None, | ||
2988 | 3131 | allow_power_cycle=False): | ||
2989 | 3118 | """Request on given user's behalf that the node be started up. | 3132 | """Request on given user's behalf that the node be started up. |
2990 | 3119 | 3133 | ||
2991 | 3120 | :param user: Requesting user. | 3134 | :param user: Requesting user. |
2992 | @@ -3171,7 +3185,10 @@ | |||
2993 | 3171 | 3185 | ||
2994 | 3172 | # Request that the node be powered on post-commit. | 3186 | # Request that the node be powered on post-commit. |
2995 | 3173 | d = post_commit() | 3187 | d = post_commit() |
2997 | 3174 | d = self._power_control_node(d, power_on_node, power_info) | 3188 | if self.power_state == POWER_STATE.ON and allow_power_cycle: |
2998 | 3189 | d = self._power_control_node(d, power_cycle, power_info) | ||
2999 | 3190 | else: | ||
3000 | 3191 | d = self._power_control_node(d, power_on_node, power_info) | ||
3001 | 3175 | 3192 | ||
3002 | 3176 | # Set the deployment timeout so the node is marked failed after | 3193 | # Set the deployment timeout so the node is marked failed after |
3003 | 3177 | # a period of time. | 3194 | # a period of time. |
3004 | @@ -3532,6 +3549,61 @@ | |||
3005 | 3532 | # deal with it. | 3549 | # deal with it. |
3006 | 3533 | raise | 3550 | raise |
3007 | 3534 | 3551 | ||
3008 | 3552 | def _as(self, model): | ||
3009 | 3553 | """Create a `model` that shares underlying storage with `self`. | ||
3010 | 3554 | |||
3011 | 3555 | In other words, the newly returned object will be an instance of | ||
3012 | 3556 | `model` and its `__dict__` will be `self.__dict__`. Not a copy, but a | ||
3013 | 3557 | reference to, so that changes to one will be reflected in the other. | ||
3014 | 3558 | """ | ||
3015 | 3559 | new = object.__new__(model) | ||
3016 | 3560 | new.__dict__ = self.__dict__ | ||
3017 | 3561 | return new | ||
3018 | 3562 | |||
3019 | 3563 | def as_node(self): | ||
3020 | 3564 | """Return a reference to self that behaves as a `Node`.""" | ||
3021 | 3565 | return self._as(Node) | ||
3022 | 3566 | |||
3023 | 3567 | def as_machine(self): | ||
3024 | 3568 | """Return a reference to self that behaves as a `Machine`.""" | ||
3025 | 3569 | return self._as(Machine) | ||
3026 | 3570 | |||
3027 | 3571 | def as_device(self): | ||
3028 | 3572 | """Return a reference to self that behaves as a `Device`.""" | ||
3029 | 3573 | return self._as(Device) | ||
3030 | 3574 | |||
3031 | 3575 | def as_region_controller(self): | ||
3032 | 3576 | """Return a reference to self that behaves as a `RegionController`.""" | ||
3033 | 3577 | return self._as(RegionController) | ||
3034 | 3578 | |||
3035 | 3579 | def as_rack_controller(self): | ||
3036 | 3580 | """Return a reference to self that behaves as a `RackController`.""" | ||
3037 | 3581 | return self._as(RackController) | ||
3038 | 3582 | |||
3039 | 3583 | def as_chassis(self): | ||
3040 | 3584 | """Return a reference to self that behaves as a `Chassis`.""" | ||
3041 | 3585 | return self._as(Chassis) | ||
3042 | 3586 | |||
3043 | 3587 | def as_storage(self): | ||
3044 | 3588 | """Return a reference to self that behaves as a `Storage`.""" | ||
3045 | 3589 | return self._as(Storage) | ||
3046 | 3590 | |||
3047 | 3591 | _as_self = { | ||
3048 | 3592 | NODE_TYPE.DEVICE: as_device, | ||
3049 | 3593 | NODE_TYPE.MACHINE: as_machine, | ||
3050 | 3594 | NODE_TYPE.RACK_CONTROLLER: as_rack_controller, | ||
3051 | 3595 | # XXX ltrager 18-02-2016 - Currently only rack controllers have | ||
3052 | 3596 | # unique functionality so when combined return a rack controller | ||
3053 | 3597 | NODE_TYPE.REGION_AND_RACK_CONTROLLER: as_rack_controller, | ||
3054 | 3598 | NODE_TYPE.REGION_CONTROLLER: as_region_controller, | ||
3055 | 3599 | NODE_TYPE.CHASSIS: as_chassis, | ||
3056 | 3600 | NODE_TYPE.STORAGE: as_storage, | ||
3057 | 3601 | } | ||
3058 | 3602 | |||
3059 | 3603 | def as_self(self): | ||
3060 | 3604 | """Return a reference to self that behaves as its own type.""" | ||
3061 | 3605 | return self._as_self[self.node_type](self) | ||
3062 | 3606 | |||
3063 | 3535 | 3607 | ||
3064 | 3536 | # Piston serializes objects based on the object class. | 3608 | # Piston serializes objects based on the object class. |
3065 | 3537 | # Here we define a proxy class so that we can specialize how devices are | 3609 | # Here we define a proxy class so that we can specialize how devices are |
3066 | @@ -4224,7 +4296,7 @@ | |||
3067 | 4224 | # If the refresh is occuring on the running region execute it using | 4296 | # If the refresh is occuring on the running region execute it using |
3068 | 4225 | # the region process. This avoids using RPC and sends the node | 4297 | # the region process. This avoids using RPC and sends the node |
3069 | 4226 | # results back to this host when in HA. | 4298 | # results back to this host when in HA. |
3071 | 4227 | yield typecast_node(self, RegionController).refresh() | 4299 | yield self.as_region_controller().refresh() |
3072 | 4228 | return | 4300 | return |
3073 | 4229 | 4301 | ||
3074 | 4230 | client = yield getClientFor(self.system_id, timeout=1) | 4302 | client = yield getClientFor(self.system_id, timeout=1) |
3075 | @@ -4457,10 +4529,9 @@ | |||
3076 | 4457 | % self.hostname) | 4529 | % self.hostname) |
3077 | 4458 | 4530 | ||
3078 | 4459 | if self.node_type == NODE_TYPE.REGION_AND_RACK_CONTROLLER: | 4531 | if self.node_type == NODE_TYPE.REGION_AND_RACK_CONTROLLER: |
3083 | 4460 | # typecast_to_node_type returns a RackController object when the | 4532 | # Node.as_self() returns a RackController object when the node is |
3084 | 4461 | # node is a REGION_AND_RACK_CONTROLLER. Thus the API and websocket | 4533 | # a REGION_AND_RACK_CONTROLLER. Thus the API and websocket will |
3085 | 4462 | # will transition a REGION_AND_RACK_CONTROLLER to a | 4534 | # transition a REGION_AND_RACK_CONTROLLER to a REGION_CONTROLLER. |
3082 | 4463 | # REGION_CONTROLLER. | ||
3086 | 4464 | self.node_type = NODE_TYPE.RACK_CONTROLLER | 4535 | self.node_type = NODE_TYPE.RACK_CONTROLLER |
3087 | 4465 | self.save() | 4536 | self.save() |
3088 | 4466 | elif self._was_probably_machine(): | 4537 | elif self._was_probably_machine(): |
3089 | @@ -4519,6 +4590,56 @@ | |||
3090 | 4519 | pass | 4590 | pass |
3091 | 4520 | 4591 | ||
3092 | 4521 | 4592 | ||
3093 | 4593 | class Chassis(Node): | ||
3094 | 4594 | """A node that contains multiple machines and can compose new machines.""" | ||
3095 | 4595 | |||
3096 | 4596 | objects = ChassisManager() | ||
3097 | 4597 | |||
3098 | 4598 | class Meta(DefaultMeta): | ||
3099 | 4599 | proxy = True | ||
3100 | 4600 | |||
3101 | 4601 | def __init__(self, *args, **kwargs): | ||
3102 | 4602 | super(Chassis, self).__init__( | ||
3103 | 4603 | node_type=NODE_TYPE.CHASSIS, *args, **kwargs) | ||
3104 | 4604 | |||
3105 | 4605 | def clean_architecture(self, prev): | ||
3106 | 4606 | # Chassis aren't required to have a defined architecture | ||
3107 | 4607 | pass | ||
3108 | 4608 | |||
3109 | 4609 | def clean_hostname_domain(self, prev): | ||
3110 | 4610 | # Chassis is never in a domain. | ||
3111 | 4611 | if self.hostname.find('.') > -1: | ||
3112 | 4612 | # They have specified an FQDN. Split up the pieces, and throw | ||
3113 | 4613 | # away the rest. | ||
3114 | 4614 | self.hostname, _ = self.hostname.split('.', 1) | ||
3115 | 4615 | self.domain = None | ||
3116 | 4616 | |||
3117 | 4617 | |||
3118 | 4618 | class Storage(Node): | ||
3119 | 4619 | """A node that provides storage to other machines.""" | ||
3120 | 4620 | |||
3121 | 4621 | objects = StorageManager() | ||
3122 | 4622 | |||
3123 | 4623 | class Meta(DefaultMeta): | ||
3124 | 4624 | proxy = True | ||
3125 | 4625 | |||
3126 | 4626 | def __init__(self, *args, **kwargs): | ||
3127 | 4627 | super(Storage, self).__init__( | ||
3128 | 4628 | node_type=NODE_TYPE.STORAGE, *args, **kwargs) | ||
3129 | 4629 | |||
3130 | 4630 | def clean_architecture(self, prev): | ||
3131 | 4631 | # Storage aren't required to have a defined architecture | ||
3132 | 4632 | pass | ||
3133 | 4633 | |||
3134 | 4634 | def clean_hostname_domain(self, prev): | ||
3135 | 4635 | # Storage is never in a domain. | ||
3136 | 4636 | if self.hostname.find('.') > -1: | ||
3137 | 4637 | # They have specified an FQDN. Split up the pieces, and throw | ||
3138 | 4638 | # away the rest. | ||
3139 | 4639 | self.hostname, _ = self.hostname.split('.', 1) | ||
3140 | 4640 | self.domain = None | ||
3141 | 4641 | |||
3142 | 4642 | |||
3143 | 4522 | class NodeGroupToRackController(CleanSave, Model): | 4643 | class NodeGroupToRackController(CleanSave, Model): |
3144 | 4523 | """Store some of the old NodeGroup data so we can migrate it when a rack | 4644 | """Store some of the old NodeGroup data so we can migrate it when a rack |
3145 | 4524 | controller is registered. | 4645 | controller is registered. |
3146 | 4525 | 4646 | ||
3147 | === modified file 'src/maasserver/models/signals/nodes.py' | |||
3148 | --- src/maasserver/models/signals/nodes.py 2016-08-16 09:31:16 +0000 | |||
3149 | +++ src/maasserver/models/signals/nodes.py 2016-12-07 15:50:52 +0000 | |||
3150 | @@ -12,8 +12,12 @@ | |||
3151 | 12 | post_save, | 12 | post_save, |
3152 | 13 | pre_save, | 13 | pre_save, |
3153 | 14 | ) | 14 | ) |
3155 | 15 | from maasserver.enum import NODE_STATUS | 15 | from maasserver.enum import ( |
3156 | 16 | NODE_STATUS, | ||
3157 | 17 | NODE_TYPE, | ||
3158 | 18 | ) | ||
3159 | 16 | from maasserver.models import ( | 19 | from maasserver.models import ( |
3160 | 20 | ChassisHints, | ||
3161 | 17 | Controller, | 21 | Controller, |
3162 | 18 | Device, | 22 | Device, |
3163 | 19 | Machine, | 23 | Machine, |
3164 | @@ -105,5 +109,23 @@ | |||
3165 | 105 | sender=klass) | 109 | sender=klass) |
3166 | 106 | 110 | ||
3167 | 107 | 111 | ||
3168 | 112 | def create_chassis_hints(sender, instance, created, **kwargs): | ||
3169 | 113 | """Create `ChassisHints` when `Chassis` is created.""" | ||
3170 | 114 | try: | ||
3171 | 115 | chassis_hints = instance.chassis_hints | ||
3172 | 116 | except ChassisHints.DoesNotExist: | ||
3173 | 117 | chassis_hints = None | ||
3174 | 118 | if instance.node_type == NODE_TYPE.CHASSIS: | ||
3175 | 119 | if chassis_hints is None: | ||
3176 | 120 | ChassisHints.objects.create(chassis=instance) | ||
3177 | 121 | elif chassis_hints is not None: | ||
3178 | 122 | chassis_hints.delete() | ||
3179 | 123 | |||
3180 | 124 | for klass in NODE_CLASSES: | ||
3181 | 125 | signals.watch( | ||
3182 | 126 | post_save, create_chassis_hints, | ||
3183 | 127 | sender=klass) | ||
3184 | 128 | |||
3185 | 129 | |||
3186 | 108 | # Enable all signals by default. | 130 | # Enable all signals by default. |
3187 | 109 | signals.enable() | 131 | signals.enable() |
3188 | 110 | 132 | ||
3189 | === modified file 'src/maasserver/models/signals/tests/test_nodes.py' | |||
3190 | --- src/maasserver/models/signals/tests/test_nodes.py 2016-09-07 14:23:05 +0000 | |||
3191 | +++ src/maasserver/models/signals/tests/test_nodes.py 2016-12-07 15:50:52 +0000 | |||
3192 | @@ -11,6 +11,7 @@ | |||
3193 | 11 | NODE_STATUS, | 11 | NODE_STATUS, |
3194 | 12 | NODE_TYPE, | 12 | NODE_TYPE, |
3195 | 13 | ) | 13 | ) |
3196 | 14 | from maasserver.models import ChassisHints | ||
3197 | 14 | from maasserver.models.service import ( | 15 | from maasserver.models.service import ( |
3198 | 15 | RACK_SERVICES, | 16 | RACK_SERVICES, |
3199 | 16 | REGION_SERVICES, | 17 | REGION_SERVICES, |
3200 | @@ -144,3 +145,27 @@ | |||
3201 | 144 | self.assertThat( | 145 | self.assertThat( |
3202 | 145 | {service.name for service in services}, | 146 | {service.name for service in services}, |
3203 | 146 | Equals(REGION_SERVICES)) | 147 | Equals(REGION_SERVICES)) |
3204 | 148 | |||
3205 | 149 | |||
3206 | 150 | class TestCreateChassisHints(MAASServerTestCase): | ||
3207 | 151 | |||
3208 | 152 | def test_creates_hints_for_chassis(self): | ||
3209 | 153 | chassis = factory.make_Node(node_type=NODE_TYPE.CHASSIS) | ||
3210 | 154 | self.assertIsNotNone(chassis.chassis_hints) | ||
3211 | 155 | |||
3212 | 156 | def test_creates_hints_device_converted_to_chassis(self): | ||
3213 | 157 | device = factory.make_Device() | ||
3214 | 158 | device.node_type = NODE_TYPE.CHASSIS | ||
3215 | 159 | device.save() | ||
3216 | 160 | self.assertIsNotNone(device.chassis_hints) | ||
3217 | 161 | |||
3218 | 162 | def test_deletes_hints_when_chassis_converted_to_device(self): | ||
3219 | 163 | chassis = factory.make_Node(node_type=NODE_TYPE.CHASSIS) | ||
3220 | 164 | chassis.node_type = NODE_TYPE.DEVICE | ||
3221 | 165 | chassis.save() | ||
3222 | 166 | error = None | ||
3223 | 167 | try: | ||
3224 | 168 | reload_object(chassis).chassis_hints | ||
3225 | 169 | except ChassisHints.DoesNotExist as exc: | ||
3226 | 170 | error = exc | ||
3227 | 171 | self.assertIsNotNone(error) | ||
3228 | 147 | 172 | ||
3229 | === modified file 'src/maasserver/models/staticipaddress.py' | |||
3230 | --- src/maasserver/models/staticipaddress.py 2016-11-17 18:53:03 +0000 | |||
3231 | +++ src/maasserver/models/staticipaddress.py 2016-12-07 15:50:52 +0000 | |||
3232 | @@ -50,19 +50,10 @@ | |||
3233 | 50 | from maasserver.models.domain import Domain | 50 | from maasserver.models.domain import Domain |
3234 | 51 | from maasserver.models.subnet import Subnet | 51 | from maasserver.models.subnet import Subnet |
3235 | 52 | from maasserver.models.timestampedmodel import TimestampedModel | 52 | from maasserver.models.timestampedmodel import TimestampedModel |
3236 | 53 | from maasserver.utils import orm | ||
3237 | 53 | from maasserver.utils.dns import get_ip_based_hostname | 54 | from maasserver.utils.dns import get_ip_based_hostname |
3238 | 54 | from maasserver.utils.orm import ( | ||
3239 | 55 | request_transaction_retry, | ||
3240 | 56 | transactional, | ||
3241 | 57 | ) | ||
3242 | 58 | from maasserver.utils.threads import deferToDatabase | ||
3243 | 59 | from netaddr import IPAddress | 55 | from netaddr import IPAddress |
3244 | 60 | from provisioningserver.logger import get_maas_logger | ||
3245 | 61 | from provisioningserver.utils.enum import map_enum_reverse | 56 | from provisioningserver.utils.enum import map_enum_reverse |
3246 | 62 | from provisioningserver.utils.twisted import asynchronous | ||
3247 | 63 | |||
3248 | 64 | |||
3249 | 65 | maaslog = get_maas_logger("node") | ||
3250 | 66 | 57 | ||
3251 | 67 | 58 | ||
3252 | 68 | class HostnameIPMapping: | 59 | class HostnameIPMapping: |
3253 | @@ -144,15 +135,13 @@ | |||
3254 | 144 | :return: `StaticIPAddress` if successful. | 135 | :return: `StaticIPAddress` if successful. |
3255 | 145 | :raise StaticIPAddressUnavailable: if the address was already taken. | 136 | :raise StaticIPAddressUnavailable: if the address was already taken. |
3256 | 146 | """ | 137 | """ |
3261 | 147 | ipaddress = StaticIPAddress( | 138 | ipaddress = StaticIPAddress(alloc_type=alloc_type, subnet=subnet) |
3258 | 148 | ip=requested_address.format(), alloc_type=alloc_type, | ||
3259 | 149 | subnet=subnet) | ||
3260 | 150 | ipaddress.set_ip_address(requested_address.format()) | ||
3262 | 151 | try: | 139 | try: |
3263 | 152 | # Try to save this address to the database. Do this in a nested | 140 | # Try to save this address to the database. Do this in a nested |
3264 | 153 | # transaction so that we can continue using the outer transaction | 141 | # transaction so that we can continue using the outer transaction |
3265 | 154 | # even if this breaks. | 142 | # even if this breaks. |
3266 | 155 | with transaction.atomic(): | 143 | with transaction.atomic(): |
3267 | 144 | ipaddress.set_ip_address(requested_address.format()) | ||
3268 | 156 | ipaddress.save() | 145 | ipaddress.save() |
3269 | 157 | except IntegrityError: | 146 | except IntegrityError: |
3270 | 158 | # The address is already taken. | 147 | # The address is already taken. |
3271 | @@ -168,6 +157,55 @@ | |||
3272 | 168 | ipaddress.save() | 157 | ipaddress.save() |
3273 | 169 | return ipaddress | 158 | return ipaddress |
3274 | 170 | 159 | ||
3275 | 160 | def _attempt_allocation_of_free_address( | ||
3276 | 161 | self, requested_address, alloc_type, user=None, subnet=None): | ||
3277 | 162 | """Attempt to allocate `requested_address`, which is known to be free. | ||
3278 | 163 | |||
3279 | 164 | It is known to be free *in this transaction*, so this could still | ||
3280 | 165 | fail. If it does fail because of a `UNIQUE_VIOLATION` it will request | ||
3281 | 166 | a retry, except while holding an addition lock. This is not perfect: | ||
3282 | 167 | other threads could jump in before acquiring the lock and steal an | ||
3283 | 168 | apparently free address. However, in stampede situations this appears | ||
3284 | 169 | to be effective enough. Experiment by increasing the `count` parameter | ||
3285 | 170 | in `test_allocate_new_works_under_extreme_concurrency`. | ||
3286 | 171 | |||
3287 | 172 | This method shares a lot in common with `_attempt_allocation` so check | ||
3288 | 173 | out its documentation for more details. | ||
3289 | 174 | |||
3290 | 175 | :param requested_address: The address to be allocated. | ||
3291 | 176 | :typr requested_address: IPAddress | ||
3292 | 177 | :param alloc_type: Allocation type. | ||
3293 | 178 | :param user: Optional user. | ||
3294 | 179 | :return: `StaticIPAddress` if successful. | ||
3295 | 180 | :raise RetryTransaction: if the address was already taken. | ||
3296 | 181 | """ | ||
3297 | 182 | ipaddress = StaticIPAddress(alloc_type=alloc_type, subnet=subnet) | ||
3298 | 183 | try: | ||
3299 | 184 | # Try to save this address to the database. Do this in a nested | ||
3300 | 185 | # transaction so that we can continue using the outer transaction | ||
3301 | 186 | # even if this breaks. | ||
3302 | 187 | with orm.savepoint(): | ||
3303 | 188 | ipaddress.set_ip_address(requested_address.format()) | ||
3304 | 189 | ipaddress.save() | ||
3305 | 190 | except IntegrityError as error: | ||
3306 | 191 | if orm.is_unique_violation(error): | ||
3307 | 192 | # The address is taken. We could allow the transaction retry | ||
3308 | 193 | # machinery to take care of this, but instead we'll ask it to | ||
3309 | 194 | # retry with the `address_allocation` lock. We can't take it | ||
3310 | 195 | # here because we're already in a transaction; we need to exit | ||
3311 | 196 | # the transaction, take the lock, and only then try again. | ||
3312 | 197 | orm.request_transaction_retry(locks.address_allocation) | ||
3313 | 198 | else: | ||
3314 | 199 | raise | ||
3315 | 200 | else: | ||
3316 | 201 | # We deliberately do *not* save the user until now because it | ||
3317 | 202 | # might result in an IntegrityError, and we rely on the latter | ||
3318 | 203 | # in the code above to indicate an already allocated IP | ||
3319 | 204 | # address and nothing else. | ||
3320 | 205 | ipaddress.user = user | ||
3321 | 206 | ipaddress.save() | ||
3322 | 207 | return ipaddress | ||
3323 | 208 | |||
3324 | 171 | def allocate_new( | 209 | def allocate_new( |
3325 | 172 | self, subnet=None, alloc_type=IPADDRESS_TYPE.AUTO, user=None, | 210 | self, subnet=None, alloc_type=IPADDRESS_TYPE.AUTO, user=None, |
3326 | 173 | requested_address=None, exclude_addresses=[]): | 211 | requested_address=None, exclude_addresses=[]): |
3327 | @@ -185,9 +223,6 @@ | |||
3328 | 185 | :param exclude_addresses: A list of addresses which MUST NOT be used. | 223 | :param exclude_addresses: A list of addresses which MUST NOT be used. |
3329 | 186 | 224 | ||
3330 | 187 | All IP parameters can be strings or netaddr.IPAddress. | 225 | All IP parameters can be strings or netaddr.IPAddress. |
3331 | 188 | |||
3332 | 189 | Note that this method has been designed to work even when the database | ||
3333 | 190 | is running with READ COMMITTED isolation. Try to keep it that way. | ||
3334 | 191 | """ | 226 | """ |
3335 | 192 | # This check for `alloc_type` is important for later on. We rely on | 227 | # This check for `alloc_type` is important for later on. We rely on |
3336 | 193 | # detecting IntegrityError as a sign than an IP address is already | 228 | # detecting IntegrityError as a sign than an IP address is already |
3337 | @@ -203,21 +238,15 @@ | |||
3338 | 203 | "Could not find an appropriate subnet.") | 238 | "Could not find an appropriate subnet.") |
3339 | 204 | 239 | ||
3340 | 205 | if requested_address is None: | 240 | if requested_address is None: |
3352 | 206 | with locks.staticip_acquire: | 241 | requested_address = subnet.get_next_ip_for_allocation( |
3353 | 207 | requested_address = self._async_find_free_ip( | 242 | exclude_addresses=exclude_addresses) |
3354 | 208 | subnet, exclude_addresses=exclude_addresses).wait(30) | 243 | return self._attempt_allocation_of_free_address( |
3355 | 209 | try: | 244 | requested_address, alloc_type, user=user, subnet=subnet) |
3345 | 210 | return self._attempt_allocation( | ||
3346 | 211 | requested_address, alloc_type, user, | ||
3347 | 212 | subnet=subnet) | ||
3348 | 213 | except StaticIPAddressUnavailable: | ||
3349 | 214 | # We lost the race: another transaction has taken this IP | ||
3350 | 215 | # address. Retry this transaction from the top. | ||
3351 | 216 | request_transaction_retry() | ||
3356 | 217 | else: | 245 | else: |
3357 | 218 | requested_address = IPAddress(requested_address) | 246 | requested_address = IPAddress(requested_address) |
3358 | 219 | subnet.validate_static_ip(requested_address) | 247 | subnet.validate_static_ip(requested_address) |
3359 | 220 | return self._attempt_allocation( | 248 | return self._attempt_allocation( |
3360 | 249 | <<<<<<< TREE | ||
3361 | 221 | requested_address, alloc_type, | 250 | requested_address, alloc_type, |
3362 | 222 | user=user, subnet=subnet) | 251 | user=user, subnet=subnet) |
3363 | 223 | 252 | ||
3364 | @@ -375,6 +404,156 @@ | |||
3365 | 375 | 404 | ||
3366 | 376 | def _find_free_ip(self, subnet, exclude_addresses=None): | 405 | def _find_free_ip(self, subnet, exclude_addresses=None): |
3367 | 377 | return subnet.get_next_ip_for_allocation(exclude_addresses) | 406 | return subnet.get_next_ip_for_allocation(exclude_addresses) |
3368 | 407 | ======= | ||
3369 | 408 | requested_address, alloc_type, user=user, subnet=subnet) | ||
3370 | 409 | |||
3371 | 410 | def _get_special_mappings(self, domain, raw_ttl=False): | ||
3372 | 411 | """Get the special mappings, possibly limited to a single Domain. | ||
3373 | 412 | |||
3374 | 413 | This function is responsible for creating these mappings: | ||
3375 | 414 | - any USER_RESERVED IP, | ||
3376 | 415 | - any IP not associated with a Node, | ||
3377 | 416 | - any IP associated with a DNSResource. | ||
3378 | 417 | The caller is responsible for addresses otherwise derived from nodes. | ||
3379 | 418 | |||
3380 | 419 | Because of how the get hostname_ip_mapping code works, we actually need | ||
3381 | 420 | to fetch ALL of the entries for subnets, but forward mappings need to | ||
3382 | 421 | be domain-specific. | ||
3383 | 422 | |||
3384 | 423 | :param domain: limit return to just the given Domain. If anything | ||
3385 | 424 | other than a Domain is passed in (e.g., a Subnet or None), we | ||
3386 | 425 | return all of the reverse mappings. | ||
3387 | 426 | :param raw_ttl: Boolean, if True then just return the address_ttl, | ||
3388 | 427 | otherwise, coalesce the address_ttl to be the correct answer for | ||
3389 | 428 | zone generation. | ||
3390 | 429 | :return: a (default) dict of hostname: HostnameIPMapping entries. | ||
3391 | 430 | """ | ||
3392 | 431 | default_ttl = "%d" % Config.objects.get_config('default_dns_ttl') | ||
3393 | 432 | if isinstance(domain, Domain): | ||
3394 | 433 | # Domains are special in that we only want to have entries for the | ||
3395 | 434 | # domain that we were asked about. And they can possibly come from | ||
3396 | 435 | # either the child or the parent for glue. | ||
3397 | 436 | where_clause = """ | ||
3398 | 437 | AND ( | ||
3399 | 438 | dnsrr.dom2_id = %s OR | ||
3400 | 439 | node.dom2_id = %s OR | ||
3401 | 440 | dnsrr.domain_id = %s OR | ||
3402 | 441 | node.domain_id = %s | ||
3403 | 442 | """ | ||
3404 | 443 | query_parms = [domain.id, domain.id, domain.id, domain.id] | ||
3405 | 444 | # And the default domain is extra special, since it needs to have | ||
3406 | 445 | # A/AAAA RRs for any USER_RESERVED addresses that have no name | ||
3407 | 446 | # otherwise attached to them. | ||
3408 | 447 | if domain.is_default(): | ||
3409 | 448 | where_clause += """ OR ( | ||
3410 | 449 | dnsrr.fqdn IS NULL AND | ||
3411 | 450 | node.fqdn IS NULL) | ||
3412 | 451 | """ | ||
3413 | 452 | where_clause += ")" | ||
3414 | 453 | else: | ||
3415 | 454 | # There is nothing special about the query for subnets. | ||
3416 | 455 | domain = None | ||
3417 | 456 | where_clause = "" | ||
3418 | 457 | query_parms = [] | ||
3419 | 458 | # raw_ttl says that we don't coalesce, but we need to pick one, so we | ||
3420 | 459 | # go with DNSResource if it is involved. | ||
3421 | 460 | if raw_ttl: | ||
3422 | 461 | ttl_clause = """COALESCE(dnsrr.address_ttl, node.address_ttl)""" | ||
3423 | 462 | else: | ||
3424 | 463 | ttl_clause = """ | ||
3425 | 464 | COALESCE( | ||
3426 | 465 | dnsrr.address_ttl, | ||
3427 | 466 | dnsrr.ttl, | ||
3428 | 467 | node.address_ttl, | ||
3429 | 468 | node.ttl, | ||
3430 | 469 | %s)""" % default_ttl | ||
3431 | 470 | # And here is the SQL query of doom. Build up inner selects to get the | ||
3432 | 471 | # view of a DNSResource (and Node) that we need, and finally use | ||
3433 | 472 | # domain2 to handle the case where an FQDN is also the name of a domain | ||
3434 | 473 | # that we know. | ||
3435 | 474 | sql_query = """ | ||
3436 | 475 | SELECT | ||
3437 | 476 | COALESCE(dnsrr.fqdn, node.fqdn) AS fqdn, | ||
3438 | 477 | node.system_id, | ||
3439 | 478 | node.node_type, | ||
3440 | 479 | """ + ttl_clause + """ AS ttl, | ||
3441 | 480 | staticip.ip | ||
3442 | 481 | FROM | ||
3443 | 482 | maasserver_staticipaddress AS staticip | ||
3444 | 483 | LEFT JOIN ( | ||
3445 | 484 | /* Create a dnsrr that has what we need. */ | ||
3446 | 485 | SELECT | ||
3447 | 486 | CASE WHEN dnsrr.name = '@' THEN | ||
3448 | 487 | dom.name | ||
3449 | 488 | ELSE | ||
3450 | 489 | CONCAT(dnsrr.name, '.', dom.name) | ||
3451 | 490 | END AS fqdn, | ||
3452 | 491 | dom.name as dom_name, | ||
3453 | 492 | dnsrr.domain_id, | ||
3454 | 493 | dnsrr.address_ttl, | ||
3455 | 494 | dom.ttl, | ||
3456 | 495 | dia.staticipaddress_id AS dnsrr_sip_id, | ||
3457 | 496 | dom2.id AS dom2_id | ||
3458 | 497 | FROM maasserver_dnsresource_ip_addresses AS dia | ||
3459 | 498 | JOIN maasserver_dnsresource AS dnsrr ON | ||
3460 | 499 | dia.dnsresource_id = dnsrr.id | ||
3461 | 500 | JOIN maasserver_domain AS dom ON | ||
3462 | 501 | dnsrr.domain_id = dom.id | ||
3463 | 502 | LEFT JOIN maasserver_domain AS dom2 ON | ||
3464 | 503 | CONCAT(dnsrr.name, '.', dom.name) = dom2.name OR ( | ||
3465 | 504 | dnsrr.name = '@' AND | ||
3466 | 505 | dom.name SIMILAR TO CONCAT('[-A-Za-z0-9]*.', dom2.name) | ||
3467 | 506 | ) | ||
3468 | 507 | ) AS dnsrr ON | ||
3469 | 508 | dnsrr_sip_id = staticip.id | ||
3470 | 509 | LEFT JOIN ( | ||
3471 | 510 | /* Create a node that has what we need. */ | ||
3472 | 511 | SELECT | ||
3473 | 512 | CONCAT(nd.hostname, '.', dom.name) AS fqdn, | ||
3474 | 513 | dom.name as dom_name, | ||
3475 | 514 | nd.system_id, | ||
3476 | 515 | nd.node_type, | ||
3477 | 516 | nd.domain_id, | ||
3478 | 517 | nd.address_ttl, | ||
3479 | 518 | dom.ttl, | ||
3480 | 519 | iia.staticipaddress_id AS node_sip_id, | ||
3481 | 520 | dom2.id AS dom2_id | ||
3482 | 521 | FROM maasserver_interface_ip_addresses AS iia | ||
3483 | 522 | JOIN maasserver_interface AS iface ON | ||
3484 | 523 | iia.interface_id = iface.id | ||
3485 | 524 | JOIN maasserver_node AS nd ON | ||
3486 | 525 | iface.node_id = nd.id | ||
3487 | 526 | JOIN maasserver_domain AS dom ON | ||
3488 | 527 | nd.domain_id = dom.id | ||
3489 | 528 | LEFT JOIN maasserver_domain AS dom2 ON | ||
3490 | 529 | CONCAT(nd.hostname, '.', dom.name) = dom2.name | ||
3491 | 530 | ) AS node ON | ||
3492 | 531 | node_sip_id = staticip.id | ||
3493 | 532 | WHERE | ||
3494 | 533 | staticip.ip IS NOT NULL AND | ||
3495 | 534 | host(staticip.ip) != '' AND | ||
3496 | 535 | ( | ||
3497 | 536 | staticip.alloc_type = %s OR | ||
3498 | 537 | node.fqdn IS NULL OR | ||
3499 | 538 | dnsrr IS NOT NULL | ||
3500 | 539 | )""" + where_clause + """ | ||
3501 | 540 | """ | ||
3502 | 541 | default_domain = Domain.objects.get_default_domain() | ||
3503 | 542 | mapping = defaultdict(HostnameIPMapping) | ||
3504 | 543 | cursor = connection.cursor() | ||
3505 | 544 | query_parms = [IPADDRESS_TYPE.USER_RESERVED] + query_parms | ||
3506 | 545 | cursor.execute(sql_query, query_parms) | ||
3507 | 546 | for (fqdn, system_id, node_type, ttl, | ||
3508 | 547 | ip) in cursor.fetchall(): | ||
3509 | 548 | if fqdn is None or fqdn == '': | ||
3510 | 549 | fqdn = "%s.%s" % ( | ||
3511 | 550 | get_ip_based_hostname(ip), default_domain.name) | ||
3512 | 551 | mapping[fqdn].node_type = node_type | ||
3513 | 552 | mapping[fqdn].system_id = system_id | ||
3514 | 553 | mapping[fqdn].ttl = ttl | ||
3515 | 554 | mapping[fqdn].ips.add(ip) | ||
3516 | 555 | return mapping | ||
3517 | 556 | >>>>>>> MERGE-SOURCE | ||
3518 | 378 | 557 | ||
3519 | 379 | def get_hostname_ip_mapping(self, domain_or_subnet, raw_ttl=False): | 558 | def get_hostname_ip_mapping(self, domain_or_subnet, raw_ttl=False): |
3520 | 380 | """Return hostname mappings for `StaticIPAddress` entries. | 559 | """Return hostname mappings for `StaticIPAddress` entries. |
3521 | 381 | 560 | ||
3522 | === modified file 'src/maasserver/models/subnet.py' | |||
3523 | --- src/maasserver/models/subnet.py 2016-10-18 16:48:13 +0000 | |||
3524 | +++ src/maasserver/models/subnet.py 2016-12-07 15:50:52 +0000 | |||
3525 | @@ -59,6 +59,7 @@ | |||
3526 | 59 | ) | 59 | ) |
3527 | 60 | from provisioningserver.logger import get_maas_logger | 60 | from provisioningserver.logger import get_maas_logger |
3528 | 61 | from provisioningserver.utils.network import ( | 61 | from provisioningserver.utils.network import ( |
3529 | 62 | IPRANGE_TYPE as MAASIPRANGE_TYPE, | ||
3530 | 62 | MAASIPSet, | 63 | MAASIPSet, |
3531 | 63 | make_ipaddress, | 64 | make_ipaddress, |
3532 | 64 | make_iprange, | 65 | make_iprange, |
3533 | @@ -386,6 +387,9 @@ | |||
3534 | 386 | active_discovery = BooleanField( | 387 | active_discovery = BooleanField( |
3535 | 387 | editable=True, blank=False, null=False, default=False) | 388 | editable=True, blank=False, null=False, default=False) |
3536 | 388 | 389 | ||
3537 | 390 | managed = BooleanField( | ||
3538 | 391 | editable=True, blank=False, null=False, default=True) | ||
3539 | 392 | |||
3540 | 389 | @property | 393 | @property |
3541 | 390 | def label(self): | 394 | def label(self): |
3542 | 391 | """Returns a human-friendly label for this subnet.""" | 395 | """Returns a human-friendly label for this subnet.""" |
3543 | @@ -474,7 +478,8 @@ | |||
3544 | 474 | 478 | ||
3545 | 475 | def get_ipranges_in_use( | 479 | def get_ipranges_in_use( |
3546 | 476 | self, exclude_addresses: IPAddressExcludeList=None, | 480 | self, exclude_addresses: IPAddressExcludeList=None, |
3548 | 477 | ranges_only: bool=False, | 481 | ranges_only: bool=False, include_reserved: bool=True, |
3549 | 482 | with_neighbours: bool=False, | ||
3550 | 478 | ignore_discovered_ips: bool=False) -> MAASIPSet: | 483 | ignore_discovered_ips: bool=False) -> MAASIPSet: |
3551 | 479 | """Returns a `MAASIPSet` of `MAASIPRange` objects which are currently | 484 | """Returns a `MAASIPSet` of `MAASIPRange` objects which are currently |
3552 | 480 | in use on this `Subnet`. | 485 | in use on this `Subnet`. |
3553 | @@ -483,6 +488,8 @@ | |||
3554 | 483 | :param ignore_discovered_ips: DISCOVERED addresses are not "in use". | 488 | :param ignore_discovered_ips: DISCOVERED addresses are not "in use". |
3555 | 484 | :param ranges_only: if True, filters out gateway IPs, static routes, | 489 | :param ranges_only: if True, filters out gateway IPs, static routes, |
3556 | 485 | DNS servers, and `exclude_addresses`. | 490 | DNS servers, and `exclude_addresses`. |
3557 | 491 | :param with_neighbours: If True, includes addresses learned from | ||
3558 | 492 | neighbour observation. | ||
3559 | 486 | """ | 493 | """ |
3560 | 487 | if exclude_addresses is None: | 494 | if exclude_addresses is None: |
3561 | 488 | exclude_addresses = [] | 495 | exclude_addresses = [] |
3562 | @@ -531,8 +538,11 @@ | |||
3563 | 531 | for address in exclude_addresses | 538 | for address in exclude_addresses |
3564 | 532 | if address in network | 539 | if address in network |
3565 | 533 | ) | 540 | ) |
3567 | 534 | ranges |= self.get_reserved_maasipset() | 541 | if include_reserved: |
3568 | 542 | ranges |= self.get_reserved_maasipset() | ||
3569 | 535 | ranges |= self.get_dynamic_maasipset() | 543 | ranges |= self.get_dynamic_maasipset() |
3570 | 544 | if with_neighbours: | ||
3571 | 545 | ranges |= self.get_maasipset_for_neighbours() | ||
3572 | 536 | return MAASIPSet(ranges) | 546 | return MAASIPSet(ranges) |
3573 | 537 | 547 | ||
3574 | 538 | def get_ipranges_available_for_reserved_range(self): | 548 | def get_ipranges_available_for_reserved_range(self): |
3575 | @@ -549,19 +559,71 @@ | |||
3576 | 549 | """Returns a `MAASIPSet` of ranges which are currently free on this | 559 | """Returns a `MAASIPSet` of ranges which are currently free on this |
3577 | 550 | `Subnet`. | 560 | `Subnet`. |
3578 | 551 | 561 | ||
3579 | 562 | :param ranges_only: if True, filters out gateway IPs, static routes, | ||
3580 | 563 | DNS servers, and `exclude_addresses`. | ||
3581 | 552 | :param exclude_addresses: An iterable of addresses not to use. | 564 | :param exclude_addresses: An iterable of addresses not to use. |
3582 | 553 | :param ignore_discovered_ips: DISCOVERED addresses are not "in use". | 565 | :param ignore_discovered_ips: DISCOVERED addresses are not "in use". |
3583 | 566 | :param with_neighbours: If True, includes addresses learned from | ||
3584 | 567 | neighbour observation. | ||
3585 | 554 | """ | 568 | """ |
3586 | 555 | if exclude_addresses is None: | 569 | if exclude_addresses is None: |
3587 | 556 | exclude_addresses = [] | 570 | exclude_addresses = [] |
3589 | 557 | ranges = self.get_ipranges_in_use( | 571 | in_use = self.get_ipranges_in_use( |
3590 | 558 | exclude_addresses=exclude_addresses, | 572 | exclude_addresses=exclude_addresses, |
3591 | 559 | ranges_only=ranges_only, | 573 | ranges_only=ranges_only, |
3592 | 574 | with_neighbours=with_neighbours, | ||
3593 | 560 | ignore_discovered_ips=ignore_discovered_ips) | 575 | ignore_discovered_ips=ignore_discovered_ips) |
3598 | 561 | if with_neighbours: | 576 | if self.managed or ranges_only: |
3599 | 562 | ranges |= self.get_maasipset_for_neighbours() | 577 | not_in_use = in_use.get_unused_ranges(self.get_ipnetwork()) |
3600 | 563 | unused = ranges.get_unused_ranges(self.get_ipnetwork()) | 578 | else: |
3601 | 564 | return unused | 579 | # The end result we want is a list of unused IP addresses *within* |
3602 | 580 | # reserved ranges. To get that result, we first need the full list | ||
3603 | 581 | # of unused IP addresses on the subnet. This is better illustrated | ||
3604 | 582 | # visually below. | ||
3605 | 583 | # | ||
3606 | 584 | # Legend: | ||
3607 | 585 | # X: in-use IP addresses | ||
3608 | 586 | # R: reserved range | ||
3609 | 587 | # Rx: reserved range (with allocated, in-use IP address) | ||
3610 | 588 | # | ||
3611 | 589 | # +----+----+----+----+----+----+ | ||
3612 | 590 | # IP address: | 1 | 2 | 3 | 4 | 5 | 6 | | ||
3613 | 591 | # +----+----+----+----+----+----+ | ||
3614 | 592 | # Usages: | X | | R | Rx | | X | | ||
3615 | 593 | # +----+----+----+----+----+----+ | ||
3616 | 594 | # | ||
3617 | 595 | # We need a set that just contains `3` in this case. To get there, | ||
3618 | 596 | # first calculate the set of all unused addresses on the subnet, | ||
3619 | 597 | # then intersect that set with set of in-use addresses *excluding* | ||
3620 | 598 | # the reserved range, then calculate which addresses within *that* | ||
3621 | 599 | # set are unused: | ||
3622 | 600 | # +----+----+----+----+----+----+ | ||
3623 | 601 | # IP address: | 1 | 2 | 3 | 4 | 5 | 6 | | ||
3624 | 602 | # +----+----+----+----+----+----+ | ||
3625 | 603 | # unused: | | U | | | U | | | ||
3626 | 604 | # +----+----+----+----+----+----+ | ||
3627 | 605 | # unmanaged_in_use: | u | | | u | | u | | ||
3628 | 606 | # +----+----+----+----+----+----+ | ||
3629 | 607 | # |= unmanaged: =============================== | ||
3630 | 608 | # +----+----+----+----+----+----+ | ||
3631 | 609 | # unmanaged_in_use: | u | U | | u | U | u | | ||
3632 | 610 | # +----+----+----+----+----+----+ | ||
3633 | 611 | # get_unused_ranges(): =============================== | ||
3634 | 612 | # +----+----+----+----+----+----+ | ||
3635 | 613 | # not_in_use: | | | n | | | | | ||
3636 | 614 | # +----+----+----+----+----+----+ | ||
3637 | 615 | unused = in_use.get_unused_ranges( | ||
3638 | 616 | self.get_ipnetwork(), purpose=MAASIPRANGE_TYPE.UNMANAGED) | ||
3639 | 617 | unmanaged_in_use = self.get_ipranges_in_use( | ||
3640 | 618 | exclude_addresses=exclude_addresses, | ||
3641 | 619 | ranges_only=ranges_only, | ||
3642 | 620 | include_reserved=False, | ||
3643 | 621 | with_neighbours=with_neighbours, | ||
3644 | 622 | ignore_discovered_ips=ignore_discovered_ips) | ||
3645 | 623 | unmanaged_in_use |= unused | ||
3646 | 624 | not_in_use = unmanaged_in_use.get_unused_ranges( | ||
3647 | 625 | self.get_ipnetwork(), purpose=MAASIPRANGE_TYPE.UNUSED) | ||
3648 | 626 | return not_in_use | ||
3649 | 565 | 627 | ||
3650 | 566 | def get_maasipset_for_neighbours(self) -> MAASIPSet: | 628 | def get_maasipset_for_neighbours(self) -> MAASIPSet: |
3651 | 567 | """Return the observed neighbours in this subnet. | 629 | """Return the observed neighbours in this subnet. |
3652 | 568 | 630 | ||
3653 | === modified file 'src/maasserver/models/tests/test_discovery.py' | |||
3654 | --- src/maasserver/models/tests/test_discovery.py 2016-11-02 20:18:07 +0000 | |||
3655 | +++ src/maasserver/models/tests/test_discovery.py 2016-12-07 15:50:52 +0000 | |||
3656 | @@ -16,6 +16,7 @@ | |||
3657 | 16 | from maasserver.testing.testcase import MAASServerTestCase | 16 | from maasserver.testing.testcase import MAASServerTestCase |
3658 | 17 | from maastesting.matchers import ( | 17 | from maastesting.matchers import ( |
3659 | 18 | DocTestMatches, | 18 | DocTestMatches, |
3660 | 19 | IsNonEmptyString, | ||
3661 | 19 | Matches, | 20 | Matches, |
3662 | 20 | MockCalledOnceWith, | 21 | MockCalledOnceWith, |
3663 | 21 | MockNotCalled, | 22 | MockNotCalled, |
3664 | @@ -34,7 +35,7 @@ | |||
3665 | 34 | 35 | ||
3666 | 35 | def test_mac_organization(self): | 36 | def test_mac_organization(self): |
3667 | 36 | discovery = factory.make_Discovery(mac_address="48:51:b7:00:00:00") | 37 | discovery = factory.make_Discovery(mac_address="48:51:b7:00:00:00") |
3669 | 37 | self.assertThat(discovery.mac_organization, Equals("Intel Corporate")) | 38 | self.assertThat(discovery.mac_organization, IsNonEmptyString) |
3670 | 38 | 39 | ||
3671 | 39 | def test__ignores_duplicate_macs(self): | 40 | def test__ignores_duplicate_macs(self): |
3672 | 40 | rack1 = factory.make_RackController() | 41 | rack1 = factory.make_RackController() |
3673 | 41 | 42 | ||
3674 | === modified file 'src/maasserver/models/tests/test_event.py' | |||
3675 | --- src/maasserver/models/tests/test_event.py 2015-12-01 18:12:59 +0000 | |||
3676 | +++ src/maasserver/models/tests/test_event.py 2016-12-07 15:50:52 +0000 | |||
3677 | @@ -11,6 +11,7 @@ | |||
3678 | 11 | from django.db import IntegrityError | 11 | from django.db import IntegrityError |
3679 | 12 | from maasserver.models import ( | 12 | from maasserver.models import ( |
3680 | 13 | Event, | 13 | Event, |
3681 | 14 | event as event_module, | ||
3682 | 14 | EventType, | 15 | EventType, |
3683 | 15 | ) | 16 | ) |
3684 | 16 | from maasserver.testing.factory import factory | 17 | from maasserver.testing.factory import factory |
3685 | @@ -84,6 +85,15 @@ | |||
3686 | 84 | self.assertIsNotNone(EventType.objects.get(name=event_type)) | 85 | self.assertIsNotNone(EventType.objects.get(name=event_type)) |
3687 | 85 | self.assertIsNotNone(Event.objects.get(node=node)) | 86 | self.assertIsNotNone(Event.objects.get(node=node)) |
3688 | 86 | 87 | ||
3689 | 88 | def test_create_region_event_creates_region_event(self): | ||
3690 | 89 | region = factory.make_RegionRackController() | ||
3691 | 90 | self.patch(event_module, 'get_maas_id').return_value = region.system_id | ||
3692 | 91 | Event.objects.create_region_event( | ||
3693 | 92 | event_type=EVENT_TYPES.REGION_IMPORT_ERROR) | ||
3694 | 93 | self.assertIsNotNone( | ||
3695 | 94 | EventType.objects.get(name=EVENT_TYPES.REGION_IMPORT_ERROR)) | ||
3696 | 95 | self.assertIsNotNone(Event.objects.get(node=region)) | ||
3697 | 96 | |||
3698 | 87 | def test_register_event_and_event_type_handles_integrity_errors(self): | 97 | def test_register_event_and_event_type_handles_integrity_errors(self): |
3699 | 88 | # It's possible that two calls to | 98 | # It's possible that two calls to |
3700 | 89 | # register_event_and_event_type() could arrive at more-or-less | 99 | # register_event_and_event_type() could arrive at more-or-less |
3701 | 90 | 100 | ||
3702 | === modified file 'src/maasserver/models/tests/test_neighbour.py' | |||
3703 | --- src/maasserver/models/tests/test_neighbour.py 2016-08-19 11:40:52 +0000 | |||
3704 | +++ src/maasserver/models/tests/test_neighbour.py 2016-12-07 15:50:52 +0000 | |||
3705 | @@ -7,11 +7,11 @@ | |||
3706 | 7 | 7 | ||
3707 | 8 | from maasserver.testing.factory import factory | 8 | from maasserver.testing.factory import factory |
3708 | 9 | from maasserver.testing.testcase import MAASServerTestCase | 9 | from maasserver.testing.testcase import MAASServerTestCase |
3710 | 10 | from testtools.matchers import Equals | 10 | from maastesting.matchers import IsNonEmptyString |
3711 | 11 | 11 | ||
3712 | 12 | 12 | ||
3713 | 13 | class TestNeighbourModel(MAASServerTestCase): | 13 | class TestNeighbourModel(MAASServerTestCase): |
3714 | 14 | 14 | ||
3715 | 15 | def test_mac_organization(self): | 15 | def test_mac_organization(self): |
3716 | 16 | neighbour = factory.make_Neighbour(mac_address="48:51:b7:00:00:00") | 16 | neighbour = factory.make_Neighbour(mac_address="48:51:b7:00:00:00") |
3718 | 17 | self.assertThat(neighbour.mac_organization, Equals("Intel Corporate")) | 17 | self.assertThat(neighbour.mac_organization, IsNonEmptyString) |
3719 | 18 | 18 | ||
3720 | === modified file 'src/maasserver/models/tests/test_node.py' | |||
3721 | --- src/maasserver/models/tests/test_node.py 2016-12-07 11:26:49 +0000 | |||
3722 | +++ src/maasserver/models/tests/test_node.py 2016-12-07 15:50:52 +0000 | |||
3723 | @@ -60,6 +60,7 @@ | |||
3724 | 60 | BondInterface, | 60 | BondInterface, |
3725 | 61 | BootResource, | 61 | BootResource, |
3726 | 62 | BridgeInterface, | 62 | BridgeInterface, |
3727 | 63 | Chassis, | ||
3728 | 63 | Config, | 64 | Config, |
3729 | 64 | Controller, | 65 | Controller, |
3730 | 65 | Device, | 66 | Device, |
3731 | @@ -78,6 +79,7 @@ | |||
3732 | 78 | RegionRackRPCConnection, | 79 | RegionRackRPCConnection, |
3733 | 79 | Service, | 80 | Service, |
3734 | 80 | Space, | 81 | Space, |
3735 | 82 | Storage, | ||
3736 | 81 | Subnet, | 83 | Subnet, |
3737 | 82 | UnknownInterface, | 84 | UnknownInterface, |
3738 | 83 | VLAN, | 85 | VLAN, |
3739 | @@ -95,8 +97,6 @@ | |||
3740 | 95 | GatewayDefinition, | 97 | GatewayDefinition, |
3741 | 96 | generate_node_system_id, | 98 | generate_node_system_id, |
3742 | 97 | PowerInfo, | 99 | PowerInfo, |
3743 | 98 | typecast_node, | ||
3744 | 99 | typecast_to_node_type, | ||
3745 | 100 | ) | 100 | ) |
3746 | 101 | from maasserver.models.signals import power as node_query | 101 | from maasserver.models.signals import power as node_query |
3747 | 102 | from maasserver.models.timestampedmodel import now | 102 | from maasserver.models.timestampedmodel import now |
3748 | @@ -137,6 +137,7 @@ | |||
3749 | 137 | from maasserver.worker_user import get_worker_user | 137 | from maasserver.worker_user import get_worker_user |
3750 | 138 | from maastesting.matchers import ( | 138 | from maastesting.matchers import ( |
3751 | 139 | DocTestMatches, | 139 | DocTestMatches, |
3752 | 140 | IsNonEmptyString, | ||
3753 | 140 | MockCalledOnce, | 141 | MockCalledOnce, |
3754 | 141 | MockCalledOnceWith, | 142 | MockCalledOnceWith, |
3755 | 142 | MockCallsMatch, | 143 | MockCallsMatch, |
3756 | @@ -154,12 +155,11 @@ | |||
3757 | 154 | disk_erasing, | 155 | disk_erasing, |
3758 | 155 | ) | 156 | ) |
3759 | 156 | from netaddr import IPAddress | 157 | from netaddr import IPAddress |
3760 | 158 | from provisioningserver.drivers.power import PowerDriverRegistry | ||
3761 | 157 | from provisioningserver.events import ( | 159 | from provisioningserver.events import ( |
3762 | 158 | EVENT_DETAILS, | 160 | EVENT_DETAILS, |
3763 | 159 | EVENT_TYPES, | 161 | EVENT_TYPES, |
3764 | 160 | ) | 162 | ) |
3765 | 161 | from provisioningserver.power import QUERY_POWER_TYPES | ||
3766 | 162 | from provisioningserver.power.schema import JSON_POWER_TYPE_PARAMETERS | ||
3767 | 163 | from provisioningserver.rpc.cluster import ( | 163 | from provisioningserver.rpc.cluster import ( |
3768 | 164 | AddChassis, | 164 | AddChassis, |
3769 | 165 | DisableAndShutoffRackd, | 165 | DisableAndShutoffRackd, |
3770 | @@ -225,59 +225,86 @@ | |||
3771 | 225 | "... after 1000 iterations ... no unused node identifiers.")) | 225 | "... after 1000 iterations ... no unused node identifiers.")) |
3772 | 226 | 226 | ||
3773 | 227 | 227 | ||
3795 | 228 | class TestTypeCastNode(MAASServerTestCase): | 228 | def HasType(type_): |
3796 | 229 | def test_all_node_types_can_be_casted(self): | 229 | return AfterPreprocessing(type, Is(type_), annotate=False) |
3797 | 230 | node = factory.make_Node() | 230 | |
3798 | 231 | cast_to = random.choice( | 231 | |
3799 | 232 | [Device, Machine, Node, RackController, RegionController]) | 232 | def SharesStorageWith(other): |
3800 | 233 | typecast_node(node, cast_to) | 233 | return AfterPreprocessing( |
3801 | 234 | self.assertIsInstance(node, cast_to) | 234 | (lambda thing: thing.__dict__), Is(other.__dict__), |
3802 | 235 | 235 | annotate=False) | |
3782 | 236 | def test_rejects_casting_to_non_node_type_objects(self): | ||
3783 | 237 | node = factory.make_Node() | ||
3784 | 238 | self.assertRaises(AssertionError, typecast_node, node, object) | ||
3785 | 239 | |||
3786 | 240 | def test_rejects_casting_non_node_type(self): | ||
3787 | 241 | node = object() | ||
3788 | 242 | cast_to = random.choice( | ||
3789 | 243 | [Device, Machine, Node, RackController, RegionController]) | ||
3790 | 244 | self.assertRaises(AssertionError, typecast_node, node, cast_to) | ||
3791 | 245 | |||
3792 | 246 | def test_sets_hostname_if_blank(self): | ||
3793 | 247 | node = factory.make_Node(hostname='') | ||
3794 | 248 | self.assertNotEqual('', node.hostname) | ||
3803 | 249 | 236 | ||
3804 | 250 | 237 | ||
3805 | 251 | class TestTypeCastToNodeType(MAASServerTestCase): | 238 | class TestTypeCastToNodeType(MAASServerTestCase): |
3806 | 239 | |||
3807 | 240 | def test_cast_to_self(self): | ||
3808 | 241 | node = factory.make_Node().as_node() | ||
3809 | 242 | node_types = set(map_enum(NODE_TYPE).values()) | ||
3810 | 243 | casts = { | ||
3811 | 244 | NODE_TYPE.DEVICE: Device, | ||
3812 | 245 | NODE_TYPE.MACHINE: Machine, | ||
3813 | 246 | NODE_TYPE.RACK_CONTROLLER: RackController, | ||
3814 | 247 | NODE_TYPE.REGION_AND_RACK_CONTROLLER: RackController, | ||
3815 | 248 | NODE_TYPE.REGION_CONTROLLER: RegionController, | ||
3816 | 249 | NODE_TYPE.CHASSIS: Chassis, | ||
3817 | 250 | NODE_TYPE.STORAGE: Storage, | ||
3818 | 251 | } | ||
3819 | 252 | self.assertThat(casts.keys(), Equals(node_types)) | ||
3820 | 253 | for node_type, cast_type in casts.items(): | ||
3821 | 254 | node.node_type = node_type | ||
3822 | 255 | node_as_self = node.as_self() | ||
3823 | 256 | self.assertThat(node, HasType(Node)) | ||
3824 | 257 | self.assertThat(node_as_self, HasType(cast_type)) | ||
3825 | 258 | self.assertThat(node_as_self, SharesStorageWith(node)) | ||
3826 | 259 | |||
3827 | 252 | def test_cast_to_machine(self): | 260 | def test_cast_to_machine(self): |
3831 | 253 | node = factory.make_Node(node_type=NODE_TYPE.MACHINE) | 261 | node = factory.make_Node().as_node() |
3832 | 254 | machine = typecast_to_node_type(node) | 262 | machine = node.as_machine() |
3833 | 255 | self.assertIsInstance(machine, Machine) | 263 | self.assertThat(node, HasType(Node)) |
3834 | 264 | self.assertThat(machine, HasType(Machine)) | ||
3835 | 265 | self.assertThat(machine, SharesStorageWith(node)) | ||
3836 | 256 | 266 | ||
3837 | 257 | def test_cast_to_rack_controller(self): | 267 | def test_cast_to_rack_controller(self): |
3847 | 258 | node = factory.make_Node(node_type=NODE_TYPE.RACK_CONTROLLER) | 268 | node = factory.make_Node().as_node() |
3848 | 259 | rack = typecast_to_node_type(node) | 269 | rack = node.as_rack_controller() |
3849 | 260 | self.assertIsInstance(rack, RackController) | 270 | self.assertThat(node, HasType(Node)) |
3850 | 261 | 271 | self.assertThat(rack, HasType(RackController)) | |
3851 | 262 | def test_cast_to_region_and_rack_controller(self): | 272 | self.assertThat(rack, SharesStorageWith(node)) |
3843 | 263 | node = factory.make_Node( | ||
3844 | 264 | node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER) | ||
3845 | 265 | rack = typecast_to_node_type(node) | ||
3846 | 266 | self.assertIsInstance(rack, RackController) | ||
3852 | 267 | 273 | ||
3853 | 268 | def test_cast_to_region_controller(self): | 274 | def test_cast_to_region_controller(self): |
3857 | 269 | node = factory.make_Node(node_type=NODE_TYPE.REGION_CONTROLLER) | 275 | node = factory.make_Node().as_node() |
3858 | 270 | region = typecast_to_node_type(node) | 276 | region = node.as_region_controller() |
3859 | 271 | self.assertIsInstance(region, RegionController) | 277 | self.assertThat(node, HasType(Node)) |
3860 | 278 | self.assertThat(region, HasType(RegionController)) | ||
3861 | 279 | self.assertThat(region, SharesStorageWith(node)) | ||
3862 | 272 | 280 | ||
3863 | 273 | def test_cast_to_device(self): | 281 | def test_cast_to_device(self): |
3871 | 274 | node = factory.make_Node(node_type=NODE_TYPE.DEVICE) | 282 | node = factory.make_Node().as_node() |
3872 | 275 | device = typecast_to_node_type(node) | 283 | device = node.as_device() |
3873 | 276 | self.assertIsInstance(device, Device) | 284 | self.assertThat(node, HasType(Node)) |
3874 | 277 | 285 | self.assertThat(device, HasType(Device)) | |
3875 | 278 | def test_throws_exception_on_unknown_type(self): | 286 | self.assertThat(device, SharesStorageWith(node)) |
3876 | 279 | node = factory.make_Node(node_type=random.randint(10, 10000)) | 287 | |
3877 | 280 | self.assertRaises(NotImplementedError, typecast_to_node_type, node) | 288 | def test_cast_to_node(self): |
3878 | 289 | machine = factory.make_Machine() | ||
3879 | 290 | node = machine.as_node() | ||
3880 | 291 | self.assertThat(machine, HasType(Machine)) | ||
3881 | 292 | self.assertThat(node, HasType(Node)) | ||
3882 | 293 | self.assertThat(node, SharesStorageWith(machine)) | ||
3883 | 294 | |||
3884 | 295 | def test_cast_to_chassis(self): | ||
3885 | 296 | node = factory.make_Node().as_node() | ||
3886 | 297 | chassis = node.as_chassis() | ||
3887 | 298 | self.assertThat(node, HasType(Node)) | ||
3888 | 299 | self.assertThat(chassis, HasType(Chassis)) | ||
3889 | 300 | self.assertThat(chassis, SharesStorageWith(node)) | ||
3890 | 301 | |||
3891 | 302 | def test_cast_to_storage(self): | ||
3892 | 303 | node = factory.make_Node().as_node() | ||
3893 | 304 | storage = node.as_storage() | ||
3894 | 305 | self.assertThat(node, HasType(Node)) | ||
3895 | 306 | self.assertThat(storage, HasType(Storage)) | ||
3896 | 307 | self.assertThat(storage, SharesStorageWith(node)) | ||
3897 | 281 | 308 | ||
3898 | 282 | 309 | ||
3899 | 283 | class TestNodeManager(MAASServerTestCase): | 310 | class TestNodeManager(MAASServerTestCase): |
3900 | @@ -1229,12 +1256,11 @@ | |||
3901 | 1229 | sentinel.power_parameters), node.get_effective_power_info()) | 1256 | sentinel.power_parameters), node.get_effective_power_info()) |
3902 | 1230 | 1257 | ||
3903 | 1231 | def test_get_effective_power_info_cant_be_queried(self): | 1258 | def test_get_effective_power_info_cant_be_queried(self): |
3910 | 1232 | all_power_types = { | 1259 | uncontrolled_power_types = [ |
3911 | 1233 | power_type_details['name'] | 1260 | driver.name |
3912 | 1234 | for power_type_details in JSON_POWER_TYPE_PARAMETERS | 1261 | for _, driver in PowerDriverRegistry |
3913 | 1235 | } | 1262 | if not driver.queryable |
3914 | 1236 | uncontrolled_power_types = all_power_types.difference( | 1263 | ] |
3909 | 1237 | QUERY_POWER_TYPES) | ||
3915 | 1238 | for power_type in uncontrolled_power_types: | 1264 | for power_type in uncontrolled_power_types: |
3916 | 1239 | node = factory.make_Node(power_type=power_type) | 1265 | node = factory.make_Node(power_type=power_type) |
3917 | 1240 | gepp = self.patch(node, "get_effective_power_parameters") | 1266 | gepp = self.patch(node, "get_effective_power_parameters") |
3918 | @@ -1245,7 +1271,11 @@ | |||
3919 | 1245 | node.get_effective_power_info()) | 1271 | node.get_effective_power_info()) |
3920 | 1246 | 1272 | ||
3921 | 1247 | def test_get_effective_power_info_can_be_queried(self): | 1273 | def test_get_effective_power_info_can_be_queried(self): |
3923 | 1248 | power_type = random.choice(QUERY_POWER_TYPES) | 1274 | power_type = random.choice([ |
3924 | 1275 | driver.name | ||
3925 | 1276 | for _, driver in PowerDriverRegistry | ||
3926 | 1277 | if driver.queryable | ||
3927 | 1278 | ]) | ||
3928 | 1249 | node = factory.make_Node(power_type=power_type) | 1279 | node = factory.make_Node(power_type=power_type) |
3929 | 1250 | gepp = self.patch(node, "get_effective_power_parameters") | 1280 | gepp = self.patch(node, "get_effective_power_parameters") |
3930 | 1251 | self.assertEqual( | 1281 | self.assertEqual( |
3931 | @@ -1499,7 +1529,8 @@ | |||
3932 | 1499 | node_start = self.patch(node, '_start') | 1529 | node_start = self.patch(node, '_start') |
3933 | 1500 | # Return a post-commit hook from Node.start(). | 1530 | # Return a post-commit hook from Node.start(). |
3934 | 1501 | node_start.side_effect = ( | 1531 | node_start.side_effect = ( |
3936 | 1502 | lambda user, user_data, old_status: post_commit()) | 1532 | lambda user, user_data, old_status, allow_power_cycle: ( |
3937 | 1533 | post_commit())) | ||
3938 | 1503 | Config.objects.set_config('disk_erase_with_secure_erase', True) | 1534 | Config.objects.set_config('disk_erase_with_secure_erase', True) |
3939 | 1504 | Config.objects.set_config('disk_erase_with_quick_erase', True) | 1535 | Config.objects.set_config('disk_erase_with_quick_erase', True) |
3940 | 1505 | with post_commit_hooks: | 1536 | with post_commit_hooks: |
3941 | @@ -1521,7 +1552,8 @@ | |||
3942 | 1521 | node_start = self.patch(node, '_start') | 1552 | node_start = self.patch(node, '_start') |
3943 | 1522 | # Return a post-commit hook from Node.start(). | 1553 | # Return a post-commit hook from Node.start(). |
3944 | 1523 | node_start.side_effect = ( | 1554 | node_start.side_effect = ( |
3946 | 1524 | lambda user, user_data, old_status: post_commit()) | 1555 | lambda user, user_data, old_status, allow_power_cycle: ( |
3947 | 1556 | post_commit())) | ||
3948 | 1525 | Config.objects.set_config('disk_erase_with_secure_erase', False) | 1557 | Config.objects.set_config('disk_erase_with_secure_erase', False) |
3949 | 1526 | Config.objects.set_config('disk_erase_with_quick_erase', False) | 1558 | Config.objects.set_config('disk_erase_with_quick_erase', False) |
3950 | 1527 | with post_commit_hooks: | 1559 | with post_commit_hooks: |
3951 | @@ -1543,14 +1575,17 @@ | |||
3952 | 1543 | node_start = self.patch(node, '_start') | 1575 | node_start = self.patch(node, '_start') |
3953 | 1544 | # Return a post-commit hook from Node.start(). | 1576 | # Return a post-commit hook from Node.start(). |
3954 | 1545 | node_start.side_effect = ( | 1577 | node_start.side_effect = ( |
3956 | 1546 | lambda user, user_data, old_status: post_commit()) | 1578 | lambda user, user_data, old_status, allow_power_cycle: ( |
3957 | 1579 | post_commit())) | ||
3958 | 1547 | with post_commit_hooks: | 1580 | with post_commit_hooks: |
3959 | 1548 | node.start_disk_erasing(owner) | 1581 | node.start_disk_erasing(owner) |
3960 | 1549 | self.expectThat(node.owner, Equals(owner)) | 1582 | self.expectThat(node.owner, Equals(owner)) |
3961 | 1550 | self.expectThat(node.status, Equals(NODE_STATUS.DISK_ERASING)) | 1583 | self.expectThat(node.status, Equals(NODE_STATUS.DISK_ERASING)) |
3962 | 1551 | self.expectThat(node.agent_name, Equals(agent_name)) | 1584 | self.expectThat(node.agent_name, Equals(agent_name)) |
3963 | 1552 | self.assertThat( | 1585 | self.assertThat( |
3965 | 1553 | node_start, MockCalledOnceWith(owner, ANY, NODE_STATUS.ALLOCATED)) | 1586 | node_start, |
3966 | 1587 | MockCalledOnceWith( | ||
3967 | 1588 | owner, ANY, NODE_STATUS.ALLOCATED, allow_power_cycle=True)) | ||
3968 | 1554 | 1589 | ||
3969 | 1555 | def test_start_disk_erasing_logs_user_request(self): | 1590 | def test_start_disk_erasing_logs_user_request(self): |
3970 | 1556 | owner = factory.make_User() | 1591 | owner = factory.make_User() |
3971 | @@ -1558,12 +1593,15 @@ | |||
3972 | 1558 | node_start = self.patch(node, '_start') | 1593 | node_start = self.patch(node, '_start') |
3973 | 1559 | # Return a post-commit hook from Node.start(). | 1594 | # Return a post-commit hook from Node.start(). |
3974 | 1560 | node_start.side_effect = ( | 1595 | node_start.side_effect = ( |
3976 | 1561 | lambda user, user_data, old_status: post_commit()) | 1596 | lambda user, user_data, old_status, allow_power_cycle: ( |
3977 | 1597 | post_commit())) | ||
3978 | 1562 | register_event = self.patch(node, '_register_request_event') | 1598 | register_event = self.patch(node, '_register_request_event') |
3979 | 1563 | with post_commit_hooks: | 1599 | with post_commit_hooks: |
3980 | 1564 | node.start_disk_erasing(owner) | 1600 | node.start_disk_erasing(owner) |
3981 | 1565 | self.assertThat( | 1601 | self.assertThat( |
3983 | 1566 | node_start, MockCalledOnceWith(owner, ANY, NODE_STATUS.ALLOCATED)) | 1602 | node_start, |
3984 | 1603 | MockCalledOnceWith( | ||
3985 | 1604 | owner, ANY, NODE_STATUS.ALLOCATED, allow_power_cycle=True)) | ||
3986 | 1567 | self.assertThat(register_event, MockCalledOnceWith( | 1605 | self.assertThat(register_event, MockCalledOnceWith( |
3987 | 1568 | owner, EVENT_TYPES.REQUEST_NODE_ERASE_DISK, | 1606 | owner, EVENT_TYPES.REQUEST_NODE_ERASE_DISK, |
3988 | 1569 | action='start disk erasing', comment=None)) | 1607 | action='start disk erasing', comment=None)) |
3989 | @@ -1626,7 +1664,8 @@ | |||
3990 | 1626 | 1664 | ||
3991 | 1627 | self.assertThat( | 1665 | self.assertThat( |
3992 | 1628 | node_start, MockCalledOnceWith( | 1666 | node_start, MockCalledOnceWith( |
3994 | 1629 | admin, generate_user_data.return_value, NODE_STATUS.ALLOCATED)) | 1667 | admin, generate_user_data.return_value, NODE_STATUS.ALLOCATED, |
3995 | 1668 | allow_power_cycle=True)) | ||
3996 | 1630 | self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) | 1669 | self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) |
3997 | 1631 | 1670 | ||
3998 | 1632 | def test_start_disk_erasing_sets_status_on_post_commit_error(self): | 1671 | def test_start_disk_erasing_sets_status_on_post_commit_error(self): |
3999 | @@ -1807,20 +1846,18 @@ | |||
4000 | 1807 | } | 1846 | } |
4001 | 1808 | # Use an "uncontrolled" power type (i.e. a power type for which we | 1847 | # Use an "uncontrolled" power type (i.e. a power type for which we |
4002 | 1809 | # cannot query the status of the node). | 1848 | # cannot query the status of the node). |
4010 | 1810 | all_power_types = { | 1849 | power_type = random.choice([ |
4011 | 1811 | power_type_details['name'] | 1850 | driver.name |
4012 | 1812 | for power_type_details in JSON_POWER_TYPE_PARAMETERS | 1851 | for _, driver in PowerDriverRegistry |
4013 | 1813 | } | 1852 | if not driver.queryable |
4014 | 1814 | uncontrolled_power_types = ( | 1853 | ]) |
4008 | 1815 | all_power_types.difference(QUERY_POWER_TYPES)) | ||
4009 | 1816 | power_type = random.choice(list(uncontrolled_power_types)) | ||
4015 | 1817 | rack = factory.make_RackController() | 1854 | rack = factory.make_RackController() |
4016 | 1818 | node = factory.make_Node_with_Interface_on_Subnet( | 1855 | node = factory.make_Node_with_Interface_on_Subnet( |
4017 | 1819 | status=NODE_STATUS.ALLOCATED, owner=owner, owner_data=owner_data, | 1856 | status=NODE_STATUS.ALLOCATED, owner=owner, owner_data=owner_data, |
4018 | 1820 | agent_name=agent_name, power_type=power_type, primary_rack=rack) | 1857 | agent_name=agent_name, power_type=power_type, primary_rack=rack) |
4019 | 1821 | self.patch(Node, '_set_status_expires') | 1858 | self.patch(Node, '_set_status_expires') |
4020 | 1822 | mock_stop = self.patch(node, "_stop") | 1859 | mock_stop = self.patch(node, "_stop") |
4022 | 1823 | mock_release_to_ready = self.patch(node, "_release_to_ready") | 1860 | mock_finalize_release = self.patch(node, "_finalize_release") |
4023 | 1824 | node.power_state = POWER_STATE.ON | 1861 | node.power_state = POWER_STATE.ON |
4024 | 1825 | node.release() | 1862 | node.release() |
4025 | 1826 | self.expectThat(Node._set_status_expires, MockNotCalled()) | 1863 | self.expectThat(Node._set_status_expires, MockNotCalled()) |
4026 | @@ -1833,7 +1870,7 @@ | |||
4027 | 1833 | self.expectThat(node.distro_series, Equals('')) | 1870 | self.expectThat(node.distro_series, Equals('')) |
4028 | 1834 | self.expectThat(node.license_key, Equals('')) | 1871 | self.expectThat(node.license_key, Equals('')) |
4029 | 1835 | self.expectThat(mock_stop, MockCalledOnceWith(node.owner)) | 1872 | self.expectThat(mock_stop, MockCalledOnceWith(node.owner)) |
4031 | 1836 | self.expectThat(mock_release_to_ready, MockCalledOnceWith()) | 1873 | self.expectThat(mock_finalize_release, MockCalledOnceWith()) |
4032 | 1837 | 1874 | ||
4033 | 1838 | def test_release_node_that_has_power_off(self): | 1875 | def test_release_node_that_has_power_off(self): |
4034 | 1839 | agent_name = factory.make_name('agent-name') | 1876 | agent_name = factory.make_name('agent-name') |
4035 | @@ -1879,6 +1916,16 @@ | |||
4036 | 1879 | [], list(NodeResult.objects.filter( | 1916 | [], list(NodeResult.objects.filter( |
4037 | 1880 | node=node, result_type=RESULT_TYPE.INSTALLATION))) | 1917 | node=node, result_type=RESULT_TYPE.INSTALLATION))) |
4038 | 1881 | 1918 | ||
4039 | 1919 | def test_release_deletes_dynamic_machine(self): | ||
4040 | 1920 | agent_name = factory.make_name('agent-name') | ||
4041 | 1921 | owner = factory.make_User() | ||
4042 | 1922 | node = factory.make_Node( | ||
4043 | 1923 | status=NODE_STATUS.ALLOCATED, owner=owner, agent_name=agent_name, | ||
4044 | 1924 | dynamic=True, power_state=POWER_STATE.OFF) | ||
4045 | 1925 | with post_commit_hooks: | ||
4046 | 1926 | node.release() | ||
4047 | 1927 | self.assertIsNone(reload_object(node)) | ||
4048 | 1928 | |||
4049 | 1882 | def test_dynamic_ip_addresses_from_ip_address_table(self): | 1929 | def test_dynamic_ip_addresses_from_ip_address_table(self): |
4050 | 1883 | node = factory.make_Node() | 1930 | node = factory.make_Node() |
4051 | 1884 | interfaces = [ | 1931 | interfaces = [ |
4052 | @@ -2233,7 +2280,8 @@ | |||
4053 | 2233 | node_start = self.patch(node, '_start') | 2280 | node_start = self.patch(node, '_start') |
4054 | 2234 | # Return a post-commit hook from Node.start(). | 2281 | # Return a post-commit hook from Node.start(). |
4055 | 2235 | node_start.side_effect = ( | 2282 | node_start.side_effect = ( |
4057 | 2236 | lambda user, user_data, old_status: post_commit()) | 2283 | lambda user, user_data, old_status, allow_power_cycle: ( |
4058 | 2284 | post_commit())) | ||
4059 | 2237 | admin = factory.make_admin() | 2285 | admin = factory.make_admin() |
4060 | 2238 | node.start_commissioning(admin) | 2286 | node.start_commissioning(admin) |
4061 | 2239 | post_commit_hooks.reset() # Ignore these for now. | 2287 | post_commit_hooks.reset() # Ignore these for now. |
4062 | @@ -2243,7 +2291,7 @@ | |||
4063 | 2243 | } | 2291 | } |
4064 | 2244 | self.assertAttributes(node, expected_attrs) | 2292 | self.assertAttributes(node, expected_attrs) |
4065 | 2245 | self.assertThat(node_start, MockCalledOnceWith( | 2293 | self.assertThat(node_start, MockCalledOnceWith( |
4067 | 2246 | admin, ANY, NODE_STATUS.NEW)) | 2294 | admin, ANY, NODE_STATUS.NEW, allow_power_cycle=True)) |
4068 | 2247 | 2295 | ||
4069 | 2248 | def test_start_commissioning_sets_options(self): | 2296 | def test_start_commissioning_sets_options(self): |
4070 | 2249 | rack = factory.make_RackController() | 2297 | rack = factory.make_RackController() |
4071 | @@ -2253,7 +2301,8 @@ | |||
4072 | 2253 | node_start = self.patch(node, '_start') | 2301 | node_start = self.patch(node, '_start') |
4073 | 2254 | # Return a post-commit hook from Node.start(). | 2302 | # Return a post-commit hook from Node.start(). |
4074 | 2255 | node_start.side_effect = ( | 2303 | node_start.side_effect = ( |
4076 | 2256 | lambda user, user_data, old_status: post_commit()) | 2304 | lambda user, user_data, old_status, allow_power_cycle: ( |
4077 | 2305 | post_commit())) | ||
4078 | 2257 | admin = factory.make_admin() | 2306 | admin = factory.make_admin() |
4079 | 2258 | enable_ssh = factory.pick_bool() | 2307 | enable_ssh = factory.pick_bool() |
4080 | 2259 | skip_networking = factory.pick_bool() | 2308 | skip_networking = factory.pick_bool() |
4081 | @@ -2274,7 +2323,8 @@ | |||
4082 | 2274 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2323 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4083 | 2275 | node_start = self.patch(node, '_start') | 2324 | node_start = self.patch(node, '_start') |
4084 | 2276 | node_start.side_effect = ( | 2325 | node_start.side_effect = ( |
4086 | 2277 | lambda user, user_data, old_status: post_commit()) | 2326 | lambda user, user_data, old_status, allow_power_cycle: ( |
4087 | 2327 | post_commit())) | ||
4088 | 2278 | user_data = factory.make_string().encode('ascii') | 2328 | user_data = factory.make_string().encode('ascii') |
4089 | 2279 | generate_user_data = self.patch( | 2329 | generate_user_data = self.patch( |
4090 | 2280 | commissioning, 'generate_user_data') | 2330 | commissioning, 'generate_user_data') |
4091 | @@ -2283,13 +2333,14 @@ | |||
4092 | 2283 | node.start_commissioning(admin) | 2333 | node.start_commissioning(admin) |
4093 | 2284 | post_commit_hooks.reset() # Ignore these for now. | 2334 | post_commit_hooks.reset() # Ignore these for now. |
4094 | 2285 | self.assertThat(node_start, MockCalledOnceWith( | 2335 | self.assertThat(node_start, MockCalledOnceWith( |
4096 | 2286 | admin, user_data, NODE_STATUS.NEW)) | 2336 | admin, user_data, NODE_STATUS.NEW, allow_power_cycle=True)) |
4097 | 2287 | 2337 | ||
4098 | 2288 | def test_start_commissioning_sets_min_hwe_kernel(self): | 2338 | def test_start_commissioning_sets_min_hwe_kernel(self): |
4099 | 2289 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2339 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4100 | 2290 | node_start = self.patch(node, '_start') | 2340 | node_start = self.patch(node, '_start') |
4101 | 2291 | node_start.side_effect = ( | 2341 | node_start.side_effect = ( |
4103 | 2292 | lambda user, user_data, old_status: post_commit()) | 2342 | lambda user, user_data, old_status, allow_power_cycle: ( |
4104 | 2343 | post_commit())) | ||
4105 | 2293 | user_data = factory.make_string().encode('ascii') | 2344 | user_data = factory.make_string().encode('ascii') |
4106 | 2294 | generate_user_data = self.patch( | 2345 | generate_user_data = self.patch( |
4107 | 2295 | commissioning, 'generate_user_data') | 2346 | commissioning, 'generate_user_data') |
4108 | @@ -2300,11 +2351,34 @@ | |||
4109 | 2300 | post_commit_hooks.reset() # Ignore these for now. | 2351 | post_commit_hooks.reset() # Ignore these for now. |
4110 | 2301 | self.assertEqual('hwe-v', node.min_hwe_kernel) | 2352 | self.assertEqual('hwe-v', node.min_hwe_kernel) |
4111 | 2302 | 2353 | ||
4112 | 2354 | def test_start_commissioning_starts_node_if_already_on(self): | ||
4113 | 2355 | node = factory.make_Node( | ||
4114 | 2356 | interface=True, status=NODE_STATUS.NEW, power_type='manual', | ||
4115 | 2357 | power_state=POWER_STATE.ON) | ||
4116 | 2358 | node_start = self.patch(node, '_start') | ||
4117 | 2359 | # Return a post-commit hook from Node.start(). | ||
4118 | 2360 | node_start.side_effect = ( | ||
4119 | 2361 | lambda user, user_data, old_status, allow_power_cycle: ( | ||
4120 | 2362 | post_commit())) | ||
4121 | 2363 | admin = factory.make_admin() | ||
4122 | 2364 | node.start_commissioning(admin) | ||
4123 | 2365 | post_commit_hooks.reset() # Ignore these for now. | ||
4124 | 2366 | node = reload_object(node) | ||
4125 | 2367 | expected_attrs = { | ||
4126 | 2368 | 'status': NODE_STATUS.COMMISSIONING, | ||
4127 | 2369 | 'owner': admin, | ||
4128 | 2370 | } | ||
4129 | 2371 | self.assertAttributes(node, expected_attrs) | ||
4130 | 2372 | self.expectThat(node.owner, Equals(admin)) | ||
4131 | 2373 | self.assertThat(node_start, MockCalledOnceWith( | ||
4132 | 2374 | admin, ANY, NODE_STATUS.NEW, allow_power_cycle=True)) | ||
4133 | 2375 | |||
4134 | 2303 | def test_start_commissioning_clears_node_commissioning_results(self): | 2376 | def test_start_commissioning_clears_node_commissioning_results(self): |
4135 | 2304 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2377 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4136 | 2305 | node_start = self.patch(node, '_start') | 2378 | node_start = self.patch(node, '_start') |
4137 | 2306 | node_start.side_effect = ( | 2379 | node_start.side_effect = ( |
4139 | 2307 | lambda user, user_data, old_status: post_commit()) | 2380 | lambda user, user_data, old_status, allow_power_cycle: ( |
4140 | 2381 | post_commit())) | ||
4141 | 2308 | NodeResult.objects.store_data( | 2382 | NodeResult.objects.store_data( |
4142 | 2309 | node, factory.make_string(), | 2383 | node, factory.make_string(), |
4143 | 2310 | random.randint(0, 10), | 2384 | random.randint(0, 10), |
4144 | @@ -2318,7 +2392,8 @@ | |||
4145 | 2318 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2392 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4146 | 2319 | node_start = self.patch(node, '_start') | 2393 | node_start = self.patch(node, '_start') |
4147 | 2320 | node_start.side_effect = ( | 2394 | node_start.side_effect = ( |
4149 | 2321 | lambda user, user_data, old_status: post_commit()) | 2395 | lambda user, user_data, old_status, allow_power_cycle: ( |
4150 | 2396 | post_commit())) | ||
4151 | 2322 | clear_storage = self.patch_autospec( | 2397 | clear_storage = self.patch_autospec( |
4152 | 2323 | node, '_clear_full_storage_configuration') | 2398 | node, '_clear_full_storage_configuration') |
4153 | 2324 | admin = factory.make_admin() | 2399 | admin = factory.make_admin() |
4154 | @@ -2330,7 +2405,8 @@ | |||
4155 | 2330 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2405 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4156 | 2331 | node_start = self.patch(node, '_start') | 2406 | node_start = self.patch(node, '_start') |
4157 | 2332 | node_start.side_effect = ( | 2407 | node_start.side_effect = ( |
4159 | 2333 | lambda user, user_data, old_status: post_commit()) | 2408 | lambda user, user_data, old_status, allow_power_cycle: ( |
4160 | 2409 | post_commit())) | ||
4161 | 2334 | clear_storage = self.patch_autospec( | 2410 | clear_storage = self.patch_autospec( |
4162 | 2335 | node, '_clear_full_storage_configuration') | 2411 | node, '_clear_full_storage_configuration') |
4163 | 2336 | admin = factory.make_admin() | 2412 | admin = factory.make_admin() |
4164 | @@ -2342,7 +2418,8 @@ | |||
4165 | 2342 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2418 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4166 | 2343 | node_start = self.patch(node, '_start') | 2419 | node_start = self.patch(node, '_start') |
4167 | 2344 | node_start.side_effect = ( | 2420 | node_start.side_effect = ( |
4169 | 2345 | lambda user, user_data, old_status: post_commit()) | 2421 | lambda user, user_data, old_status, allow_power_cycle: ( |
4170 | 2422 | post_commit())) | ||
4171 | 2346 | clear_networking = self.patch_autospec( | 2423 | clear_networking = self.patch_autospec( |
4172 | 2347 | node, '_clear_networking_configuration') | 2424 | node, '_clear_networking_configuration') |
4173 | 2348 | admin = factory.make_admin() | 2425 | admin = factory.make_admin() |
4174 | @@ -2354,7 +2431,8 @@ | |||
4175 | 2354 | node = factory.make_Node(status=NODE_STATUS.NEW) | 2431 | node = factory.make_Node(status=NODE_STATUS.NEW) |
4176 | 2355 | node_start = self.patch(node, '_start') | 2432 | node_start = self.patch(node, '_start') |
4177 | 2356 | node_start.side_effect = ( | 2433 | node_start.side_effect = ( |
4179 | 2357 | lambda user, user_data, old_status: post_commit()) | 2434 | lambda user, user_data, old_status, allow_power_cycle: ( |
4180 | 2435 | post_commit())) | ||
4181 | 2358 | clear_networking = self.patch_autospec( | 2436 | clear_networking = self.patch_autospec( |
4182 | 2359 | node, '_clear_networking_configuration') | 2437 | node, '_clear_networking_configuration') |
4183 | 2360 | admin = factory.make_admin() | 2438 | admin = factory.make_admin() |
4184 | @@ -2398,7 +2476,8 @@ | |||
4185 | 2398 | self.assertThat( | 2476 | self.assertThat( |
4186 | 2399 | node_start, | 2477 | node_start, |
4187 | 2400 | MockCalledOnceWith( | 2478 | MockCalledOnceWith( |
4189 | 2401 | admin, generate_user_data.return_value, NODE_STATUS.NEW)) | 2479 | admin, generate_user_data.return_value, NODE_STATUS.NEW, |
4190 | 2480 | allow_power_cycle=True)) | ||
4191 | 2402 | self.assertEqual(NODE_STATUS.NEW, node.status) | 2481 | self.assertEqual(NODE_STATUS.NEW, node.status) |
4192 | 2403 | 2482 | ||
4193 | 2404 | def test_start_commissioning_logs_and_raises_errors_in_starting(self): | 2483 | def test_start_commissioning_logs_and_raises_errors_in_starting(self): |
4194 | @@ -2422,7 +2501,8 @@ | |||
4195 | 2422 | node_start = self.patch(node, '_start') | 2501 | node_start = self.patch(node, '_start') |
4196 | 2423 | # Return a post-commit hook from Node.start(). | 2502 | # Return a post-commit hook from Node.start(). |
4197 | 2424 | node_start.side_effect = ( | 2503 | node_start.side_effect = ( |
4199 | 2425 | lambda user, user_data, old_status: post_commit()) | 2504 | lambda user, user_data, old_status, allow_power_cycle: ( |
4200 | 2505 | post_commit())) | ||
4201 | 2426 | admin = factory.make_admin() | 2506 | admin = factory.make_admin() |
4202 | 2427 | node.start_commissioning(admin) | 2507 | node.start_commissioning(admin) |
4203 | 2428 | post_commit_hooks.reset() # Ignore these for now. | 2508 | post_commit_hooks.reset() # Ignore these for now. |
4204 | @@ -2640,11 +2720,10 @@ | |||
4205 | 2640 | 2720 | ||
4206 | 2641 | def test_full_clean_checks_architecture_for_installable_nodes(self): | 2721 | def test_full_clean_checks_architecture_for_installable_nodes(self): |
4207 | 2642 | device = factory.make_Device(architecture='') | 2722 | device = factory.make_Device(architecture='') |
4211 | 2643 | # Set type here so we don't cause exception while creating object | 2723 | device.node_type = factory.pick_enum( |
4209 | 2644 | node = typecast_node(device, Node) | ||
4210 | 2645 | node.node_type = factory.pick_enum( | ||
4212 | 2646 | NODE_TYPE, but_not=[NODE_TYPE.DEVICE]) | 2724 | NODE_TYPE, but_not=[NODE_TYPE.DEVICE]) |
4214 | 2647 | exception = self.assertRaises(ValidationError, node.full_clean) | 2725 | exception = self.assertRaises( |
4215 | 2726 | ValidationError, device.as_node().full_clean) | ||
4216 | 2648 | self.assertEqual( | 2727 | self.assertEqual( |
4217 | 2649 | exception.message_dict, | 2728 | exception.message_dict, |
4218 | 2650 | {'architecture': | 2729 | {'architecture': |
4219 | @@ -2985,9 +3064,7 @@ | |||
4220 | 2985 | INTERFACE_TYPE.PHYSICAL, mac_address='ec:a8:6b:fd:ae:3f', | 3064 | INTERFACE_TYPE.PHYSICAL, mac_address='ec:a8:6b:fd:ae:3f', |
4221 | 2986 | node=node) | 3065 | node=node) |
4222 | 2987 | node.save() | 3066 | node.save() |
4226 | 2988 | self.assertEqual( | 3067 | self.assertThat(node.get_pxe_mac_vendor(), IsNonEmptyString) |
4224 | 2989 | "ELITEGROUP COMPUTER SYSTEMS CO., LTD.", | ||
4225 | 2990 | node.get_pxe_mac_vendor()) | ||
4227 | 2991 | 3068 | ||
4228 | 2992 | def test_get_extra_macs_returns_all_but_boot_interface_mac(self): | 3069 | def test_get_extra_macs_returns_all_but_boot_interface_mac(self): |
4229 | 2993 | node = factory.make_Node() | 3070 | node = factory.make_Node() |
4230 | @@ -4914,10 +4991,12 @@ | |||
4231 | 4914 | register_view("maasserver_discovery") | 4991 | register_view("maasserver_discovery") |
4232 | 4915 | 4992 | ||
4233 | 4916 | def make_acquired_node_with_interface( | 4993 | def make_acquired_node_with_interface( |
4235 | 4917 | self, user, bmc_connected_to=None, power_type="virsh"): | 4994 | self, user, bmc_connected_to=None, power_type="virsh", |
4236 | 4995 | power_state=POWER_STATE.OFF): | ||
4237 | 4918 | node = factory.make_Node_with_Interface_on_Subnet( | 4996 | node = factory.make_Node_with_Interface_on_Subnet( |
4238 | 4919 | status=NODE_STATUS.READY, with_boot_disk=True, | 4997 | status=NODE_STATUS.READY, with_boot_disk=True, |
4240 | 4920 | bmc_connected_to=bmc_connected_to, power_type=power_type) | 4998 | bmc_connected_to=bmc_connected_to, power_type=power_type, |
4241 | 4999 | power_state=power_state) | ||
4242 | 4921 | node.acquire(user) | 5000 | node.acquire(user) |
4243 | 4922 | return node | 5001 | return node |
4244 | 4923 | 5002 | ||
4245 | @@ -5035,6 +5114,24 @@ | |||
4246 | 5035 | node.system_id, status=old_status), | 5114 | node.system_id, status=old_status), |
4247 | 5036 | call(callOutToDatabase, node.release_interface_config))) | 5115 | call(callOutToDatabase, node.release_interface_config))) |
4248 | 5037 | 5116 | ||
4249 | 5117 | def test__calls_power_cycle_when_cycling_allowed(self): | ||
4250 | 5118 | user = factory.make_User() | ||
4251 | 5119 | node = self.make_acquired_node_with_interface( | ||
4252 | 5120 | user, power_state=POWER_STATE.ON) | ||
4253 | 5121 | |||
4254 | 5122 | post_commit_defer = self.patch(node_module, "post_commit") | ||
4255 | 5123 | mock_power_control = self.patch(Node, "_power_control_node") | ||
4256 | 5124 | mock_power_control.return_value = post_commit_defer | ||
4257 | 5125 | |||
4258 | 5126 | # Power cycling is allowed when starting deployment. This node is | ||
4259 | 5127 | # allocated and the power_state is ON. Power cycle should be called | ||
4260 | 5128 | # instead of power_on. | ||
4261 | 5129 | node.start(user) | ||
4262 | 5130 | |||
4263 | 5131 | # Calls _power_control_node when power_cycle. | ||
4264 | 5132 | self.assertThat( | ||
4265 | 5133 | mock_power_control, MockCalledOnceWith(ANY, power_cycle, ANY)) | ||
4266 | 5134 | |||
4267 | 5038 | def test_storage_layout_issues_returns_invalid_no_boot_arm64_non_efi(self): | 5135 | def test_storage_layout_issues_returns_invalid_no_boot_arm64_non_efi(self): |
4268 | 5039 | node = factory.make_Node( | 5136 | node = factory.make_Node( |
4269 | 5040 | architecture="arm64/generic", bios_boot_method="pxe") | 5137 | architecture="arm64/generic", bios_boot_method="pxe") |
4270 | @@ -5640,8 +5737,7 @@ | |||
4271 | 5640 | ) | 5737 | ) |
4272 | 5641 | 5738 | ||
4273 | 5642 | def create_empty_controller(self): | 5739 | def create_empty_controller(self): |
4276 | 5643 | node = factory.make_Node(node_type=self.node_type) | 5740 | return factory.make_Node(node_type=self.node_type).as_self() |
4275 | 5644 | return typecast_to_node_type(node) | ||
4277 | 5645 | 5741 | ||
4278 | 5646 | def test__order_of_calls_to_update_interface_is_always_the_same(self): | 5742 | def test__order_of_calls_to_update_interface_is_always_the_same(self): |
4279 | 5647 | controller = self.create_empty_controller() | 5743 | controller = self.create_empty_controller() |
4280 | @@ -8369,7 +8465,7 @@ | |||
4281 | 8369 | region_and_rack = factory.make_Node( | 8465 | region_and_rack = factory.make_Node( |
4282 | 8370 | node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER) | 8466 | node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER) |
4283 | 8371 | system_id = region_and_rack.system_id | 8467 | system_id = region_and_rack.system_id |
4285 | 8372 | typecast_node(region_and_rack, RackController).delete() | 8468 | region_and_rack.as_rack_controller().delete() |
4286 | 8373 | self.assertEquals( | 8469 | self.assertEquals( |
4287 | 8374 | NODE_TYPE.REGION_CONTROLLER, | 8470 | NODE_TYPE.REGION_CONTROLLER, |
4288 | 8375 | Node.objects.get(system_id=system_id).node_type) | 8471 | Node.objects.get(system_id=system_id).node_type) |
4289 | @@ -8576,7 +8672,7 @@ | |||
4290 | 8576 | def test_delete_converts_region_and_rack_to_rack(self): | 8672 | def test_delete_converts_region_and_rack_to_rack(self): |
4291 | 8577 | region_and_rack = factory.make_Node( | 8673 | region_and_rack = factory.make_Node( |
4292 | 8578 | node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER) | 8674 | node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER) |
4294 | 8579 | typecast_node(region_and_rack, RegionController).delete() | 8675 | region_and_rack.as_region_controller().delete() |
4295 | 8580 | self.assertEquals( | 8676 | self.assertEquals( |
4296 | 8581 | NODE_TYPE.RACK_CONTROLLER, | 8677 | NODE_TYPE.RACK_CONTROLLER, |
4297 | 8582 | Node.objects.get(system_id=region_and_rack.system_id).node_type) | 8678 | Node.objects.get(system_id=region_and_rack.system_id).node_type) |
4298 | @@ -8776,3 +8872,25 @@ | |||
4299 | 8776 | self.assertThat(monitoring_state, Contains('eth2')) | 8872 | self.assertThat(monitoring_state, Contains('eth2')) |
4300 | 8777 | self.assertThat( | 8873 | self.assertThat( |
4301 | 8778 | monitoring_state['eth1'], Equals(eth1.get_discovery_state())) | 8874 | monitoring_state['eth1'], Equals(eth1.get_discovery_state())) |
4302 | 8875 | |||
4303 | 8876 | |||
4304 | 8877 | class TestChassis(MAASServerTestCase): | ||
4305 | 8878 | |||
4306 | 8879 | def test__domain_is_always_empty(self): | ||
4307 | 8880 | hostname = factory.make_hostname() | ||
4308 | 8881 | domain = factory.make_name("domain") | ||
4309 | 8882 | chassis = factory.make_Chassis( | ||
4310 | 8883 | hostname="%s.%s" % (hostname, domain)) | ||
4311 | 8884 | self.assertEquals(hostname, chassis.hostname) | ||
4312 | 8885 | self.assertIsNone(chassis.domain) | ||
4313 | 8886 | |||
4314 | 8887 | |||
4315 | 8888 | class TestStorage(MAASServerTestCase): | ||
4316 | 8889 | |||
4317 | 8890 | def test__domain_is_always_empty(self): | ||
4318 | 8891 | hostname = factory.make_hostname() | ||
4319 | 8892 | domain = factory.make_name("domain") | ||
4320 | 8893 | storage = factory.make_Storage( | ||
4321 | 8894 | hostname="%s.%s" % (hostname, domain)) | ||
4322 | 8895 | self.assertEquals(hostname, storage.hostname) | ||
4323 | 8896 | self.assertIsNone(storage.domain) | ||
4324 | 8779 | 8897 | ||
4325 | === modified file 'src/maasserver/models/tests/test_staticipaddress.py' | |||
4326 | --- src/maasserver/models/tests/test_staticipaddress.py 2016-12-07 15:03:00 +0000 | |||
4327 | +++ src/maasserver/models/tests/test_staticipaddress.py 2016-12-07 15:50:52 +0000 | |||
4328 | @@ -9,11 +9,12 @@ | |||
4329 | 9 | randint, | 9 | randint, |
4330 | 10 | shuffle, | 10 | shuffle, |
4331 | 11 | ) | 11 | ) |
4332 | 12 | import threading | ||
4333 | 12 | from unittest import skip | 13 | from unittest import skip |
4334 | 13 | from unittest.mock import sentinel | 14 | from unittest.mock import sentinel |
4335 | 14 | 15 | ||
4336 | 15 | from django.core.exceptions import ValidationError | 16 | from django.core.exceptions import ValidationError |
4338 | 16 | from django.db import transaction | 17 | from django.db import IntegrityError |
4339 | 17 | from maasserver import locks | 18 | from maasserver import locks |
4340 | 18 | from maasserver.dbviews import register_view | 19 | from maasserver.dbviews import register_view |
4341 | 19 | from maasserver.enum import ( | 20 | from maasserver.enum import ( |
4342 | @@ -41,29 +42,39 @@ | |||
4343 | 41 | MAASServerTestCase, | 42 | MAASServerTestCase, |
4344 | 42 | MAASTransactionServerTestCase, | 43 | MAASTransactionServerTestCase, |
4345 | 43 | ) | 44 | ) |
4347 | 44 | from maasserver.utils.dns import get_ip_based_hostname | 45 | <<<<<<< TREE |
4348 | 46 | from maasserver.utils.dns import get_ip_based_hostname | ||
4349 | 47 | ======= | ||
4350 | 48 | from maasserver.utils import orm | ||
4351 | 49 | from maasserver.utils.dns import get_ip_based_hostname | ||
4352 | 50 | >>>>>>> MERGE-SOURCE | ||
4353 | 45 | from maasserver.utils.orm import ( | 51 | from maasserver.utils.orm import ( |
4354 | 46 | reload_object, | 52 | reload_object, |
4355 | 47 | RetryTransaction, | ||
4356 | 48 | transactional, | 53 | transactional, |
4357 | 49 | ) | 54 | ) |
4358 | 50 | from maasserver.websockets.base import dehydrate_datetime | 55 | from maasserver.websockets.base import dehydrate_datetime |
4359 | 51 | from maastesting.matchers import ( | ||
4360 | 52 | MockCalledOnceWith, | ||
4361 | 53 | MockNotCalled, | ||
4362 | 54 | ) | ||
4363 | 55 | from netaddr import IPAddress | 56 | from netaddr import IPAddress |
4364 | 57 | from psycopg2.errorcodes import FOREIGN_KEY_VIOLATION | ||
4365 | 56 | from testtools import ExpectedException | 58 | from testtools import ExpectedException |
4366 | 57 | from testtools.matchers import ( | 59 | from testtools.matchers import ( |
4367 | 60 | AfterPreprocessing, | ||
4368 | 61 | AllMatch, | ||
4369 | 58 | Contains, | 62 | Contains, |
4370 | 59 | Equals, | 63 | Equals, |
4371 | 60 | HasLength, | 64 | HasLength, |
4372 | 65 | Is, | ||
4373 | 66 | IsInstance, | ||
4374 | 61 | Not, | 67 | Not, |
4375 | 62 | ) | 68 | ) |
4376 | 69 | from twisted.python.failure import Failure | ||
4377 | 63 | 70 | ||
4378 | 64 | 71 | ||
4379 | 65 | class TestStaticIPAddressManager(MAASServerTestCase): | 72 | class TestStaticIPAddressManager(MAASServerTestCase): |
4380 | 66 | 73 | ||
4381 | 74 | def setUp(self): | ||
4382 | 75 | super(TestStaticIPAddressManager, self).setUp() | ||
4383 | 76 | register_view("maasserver_discovery") | ||
4384 | 77 | |||
4385 | 67 | def test_filter_by_ip_family_ipv4(self): | 78 | def test_filter_by_ip_family_ipv4(self): |
4386 | 68 | network_v4 = factory.make_ipv4_network() | 79 | network_v4 = factory.make_ipv4_network() |
4387 | 69 | subnet_v4 = factory.make_Subnet(cidr=str(network_v4.cidr)) | 80 | subnet_v4 = factory.make_Subnet(cidr=str(network_v4.cidr)) |
4388 | @@ -134,36 +145,21 @@ | |||
4389 | 134 | StaticIPAddress.objects.filter_by_subnet_cidr_family( | 145 | StaticIPAddress.objects.filter_by_subnet_cidr_family( |
4390 | 135 | IPADDRESS_FAMILY.IPv6)) | 146 | IPADDRESS_FAMILY.IPv6)) |
4391 | 136 | 147 | ||
4392 | 137 | |||
4393 | 138 | class TestStaticIPAddressManagerTransactional(MAASTransactionServerTestCase): | ||
4394 | 139 | """The following TestStaticIPAddressManager tests require | ||
4395 | 140 | MAASTransactionServerTestCase, and thus have been separated from the | ||
4396 | 141 | TestStaticIPAddressManager above. | ||
4397 | 142 | """ | ||
4398 | 143 | |||
4399 | 144 | def setUp(self): | ||
4400 | 145 | register_view("maasserver_discovery") | ||
4401 | 146 | return super().setUp() | ||
4402 | 147 | |||
4403 | 148 | def test_allocate_new_returns_ip_in_correct_range(self): | 148 | def test_allocate_new_returns_ip_in_correct_range(self): |
4408 | 149 | with transaction.atomic(): | 149 | subnet = factory.make_managed_Subnet() |
4409 | 150 | subnet = factory.make_managed_Subnet() | 150 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) |
4406 | 151 | with transaction.atomic(): | ||
4407 | 152 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) | ||
4410 | 153 | self.assertIsInstance(ipaddress, StaticIPAddress) | 151 | self.assertIsInstance(ipaddress, StaticIPAddress) |
4411 | 154 | self.assertTrue( | 152 | self.assertTrue( |
4412 | 155 | subnet.is_valid_static_ip(ipaddress.ip), | 153 | subnet.is_valid_static_ip(ipaddress.ip), |
4413 | 156 | "%s: not valid for subnet with reserved IPs: %r" % ( | 154 | "%s: not valid for subnet with reserved IPs: %r" % ( |
4414 | 157 | ipaddress.ip, subnet.get_ipranges_in_use())) | 155 | ipaddress.ip, subnet.get_ipranges_in_use())) |
4415 | 158 | 156 | ||
4416 | 159 | @transactional | ||
4417 | 160 | def test_allocate_new_allocates_IPv6_address(self): | 157 | def test_allocate_new_allocates_IPv6_address(self): |
4419 | 161 | subnet = factory.make_managed_ipv6_Subnet() | 158 | subnet = factory.make_managed_Subnet(ipv6=True) |
4420 | 162 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) | 159 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) |
4421 | 163 | self.assertIsInstance(ipaddress, StaticIPAddress) | 160 | self.assertIsInstance(ipaddress, StaticIPAddress) |
4422 | 164 | self.assertTrue(subnet.is_valid_static_ip(ipaddress.ip)) | 161 | self.assertTrue(subnet.is_valid_static_ip(ipaddress.ip)) |
4423 | 165 | 162 | ||
4424 | 166 | @transactional | ||
4425 | 167 | def test_allocate_new_sets_user(self): | 163 | def test_allocate_new_sets_user(self): |
4426 | 168 | subnet = factory.make_managed_Subnet() | 164 | subnet = factory.make_managed_Subnet() |
4427 | 169 | user = factory.make_User() | 165 | user = factory.make_User() |
4428 | @@ -171,7 +167,6 @@ | |||
4429 | 171 | subnet=subnet, alloc_type=IPADDRESS_TYPE.USER_RESERVED, user=user) | 167 | subnet=subnet, alloc_type=IPADDRESS_TYPE.USER_RESERVED, user=user) |
4430 | 172 | self.assertEqual(user, ipaddress.user) | 168 | self.assertEqual(user, ipaddress.user) |
4431 | 173 | 169 | ||
4432 | 174 | @transactional | ||
4433 | 175 | def test_allocate_new_with_user_disallows_wrong_alloc_types(self): | 170 | def test_allocate_new_with_user_disallows_wrong_alloc_types(self): |
4434 | 176 | subnet = factory.make_managed_Subnet() | 171 | subnet = factory.make_managed_Subnet() |
4435 | 177 | user = factory.make_User() | 172 | user = factory.make_User() |
4436 | @@ -185,7 +180,6 @@ | |||
4437 | 185 | StaticIPAddress.objects.allocate_new( | 180 | StaticIPAddress.objects.allocate_new( |
4438 | 186 | subnet, user=user, alloc_type=alloc_type) | 181 | subnet, user=user, alloc_type=alloc_type) |
4439 | 187 | 182 | ||
4440 | 188 | @transactional | ||
4441 | 189 | def test_allocate_new_with_reserved_type_requires_a_user(self): | 183 | def test_allocate_new_with_reserved_type_requires_a_user(self): |
4442 | 190 | subnet = factory.make_managed_Subnet() | 184 | subnet = factory.make_managed_Subnet() |
4443 | 191 | with ExpectedException(AssertionError): | 185 | with ExpectedException(AssertionError): |
4444 | @@ -196,18 +190,15 @@ | |||
4445 | 196 | # Django has a bug that casts IP addresses with HOST(), which | 190 | # Django has a bug that casts IP addresses with HOST(), which |
4446 | 197 | # results in alphabetical comparisons of strings instead of IP | 191 | # results in alphabetical comparisons of strings instead of IP |
4447 | 198 | # addresses. See https://bugs.launchpad.net/maas/+bug/1338452 | 192 | # addresses. See https://bugs.launchpad.net/maas/+bug/1338452 |
4458 | 199 | with transaction.atomic(): | 193 | subnet = factory.make_Subnet( |
4459 | 200 | subnet = factory.make_Subnet( | 194 | cidr='10.0.0.0/24', gateway_ip='10.0.0.1') |
4460 | 201 | cidr='10.0.0.0/24', gateway_ip='10.0.0.1') | 195 | factory.make_IPRange(subnet, '10.0.0.2', '10.0.0.97') |
4461 | 202 | factory.make_IPRange(subnet, '10.0.0.2', '10.0.0.97') | 196 | factory.make_IPRange(subnet, '10.0.0.101', '10.0.0.254') |
4462 | 203 | factory.make_IPRange(subnet, '10.0.0.101', '10.0.0.254') | 197 | factory.make_StaticIPAddress("10.0.0.99", subnet=subnet) |
4463 | 204 | factory.make_StaticIPAddress("10.0.0.99", subnet=subnet) | 198 | subnet = reload_object(subnet) |
4464 | 205 | subnet = reload_object(subnet) | 199 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) |
4465 | 206 | with transaction.atomic(): | 200 | self.assertEqual(ipaddress.ip, "10.0.0.98") |
4456 | 207 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) | ||
4457 | 208 | self.assertEqual(ipaddress.ip, "10.0.0.98") | ||
4466 | 209 | 201 | ||
4467 | 210 | @transactional | ||
4468 | 211 | def test_allocate_new_returns_requested_IP_if_available(self): | 202 | def test_allocate_new_returns_requested_IP_if_available(self): |
4469 | 212 | subnet = factory.make_Subnet(cidr='10.0.0.0/24') | 203 | subnet = factory.make_Subnet(cidr='10.0.0.0/24') |
4470 | 213 | ipaddress = StaticIPAddress.objects.allocate_new( | 204 | ipaddress = StaticIPAddress.objects.allocate_new( |
4471 | @@ -220,7 +211,6 @@ | |||
4472 | 220 | requested_address='10.0.0.1') | 211 | requested_address='10.0.0.1') |
4473 | 221 | self.assertEqual('10.0.0.1', ipaddress.ip) | 212 | self.assertEqual('10.0.0.1', ipaddress.ip) |
4474 | 222 | 213 | ||
4475 | 223 | @transactional | ||
4476 | 224 | def test_allocate_new_raises_when_requested_IP_unavailable(self): | 214 | def test_allocate_new_raises_when_requested_IP_unavailable(self): |
4477 | 225 | subnet = factory.make_ipv4_Subnet_with_IPRanges() | 215 | subnet = factory.make_ipv4_Subnet_with_IPRanges() |
4478 | 226 | requested_address = StaticIPAddress.objects.allocate_new( | 216 | requested_address = StaticIPAddress.objects.allocate_new( |
4479 | @@ -235,28 +225,6 @@ | |||
4480 | 235 | StaticIPAddress.objects.allocate_new( | 225 | StaticIPAddress.objects.allocate_new( |
4481 | 236 | subnet, requested_address=requested_address) | 226 | subnet, requested_address=requested_address) |
4482 | 237 | 227 | ||
4483 | 238 | @transactional | ||
4484 | 239 | def test_allocate_new_requests_transaction_retry_if_ip_taken(self): | ||
4485 | 240 | subnet = factory.make_ipv4_Subnet_with_IPRanges() | ||
4486 | 241 | # Simulate a "IP already taken" error. | ||
4487 | 242 | mock_attempt_allocation = self.patch( | ||
4488 | 243 | StaticIPAddress.objects, '_attempt_allocation') | ||
4489 | 244 | mock_attempt_allocation.side_effect = StaticIPAddressUnavailable() | ||
4490 | 245 | self.assertRaises( | ||
4491 | 246 | RetryTransaction, StaticIPAddress.objects.allocate_new, subnet) | ||
4492 | 247 | |||
4493 | 248 | @transactional | ||
4494 | 249 | def test_allocate_new_does_not_use_lock_for_requested_ip(self): | ||
4495 | 250 | # When requesting a specific IP address, there's no need to | ||
4496 | 251 | # acquire the lock. | ||
4497 | 252 | lock = self.patch(locks, 'staticip_acquire') | ||
4498 | 253 | subnet = factory.make_Subnet(cidr='10.0.0.0/24') | ||
4499 | 254 | ipaddress = StaticIPAddress.objects.allocate_new( | ||
4500 | 255 | subnet, requested_address='10.0.0.1') | ||
4501 | 256 | self.assertIsInstance(ipaddress, StaticIPAddress) | ||
4502 | 257 | self.assertThat(lock.__enter__, MockNotCalled()) | ||
4503 | 258 | |||
4504 | 259 | @transactional | ||
4505 | 260 | def test_allocate_new_raises_when_requested_IP_out_of_network(self): | 228 | def test_allocate_new_raises_when_requested_IP_out_of_network(self): |
4506 | 261 | subnet = factory.make_Subnet(cidr='10.0.0.0/24') | 229 | subnet = factory.make_Subnet(cidr='10.0.0.0/24') |
4507 | 262 | requested_address = '10.0.1.1' | 230 | requested_address = '10.0.1.1' |
4508 | @@ -275,31 +243,28 @@ | |||
4509 | 275 | str(e)) | 243 | str(e)) |
4510 | 276 | 244 | ||
4511 | 277 | def test_allocate_new_raises_when_requested_IP_in_dynamic_range(self): | 245 | def test_allocate_new_raises_when_requested_IP_in_dynamic_range(self): |
4535 | 278 | with transaction.atomic(): | 246 | subnet = factory.make_ipv4_Subnet_with_IPRanges() |
4536 | 279 | subnet = factory.make_ipv4_Subnet_with_IPRanges() | 247 | dynamic_range = subnet.get_dynamic_ranges().first() |
4537 | 280 | dynamic_range = subnet.get_dynamic_ranges().first() | 248 | requested_address = str(IPAddress( |
4538 | 281 | requested_address = str(IPAddress( | 249 | dynamic_range.netaddr_iprange.first)) |
4539 | 282 | dynamic_range.netaddr_iprange.first)) | 250 | dynamic_range_end = str(IPAddress( |
4540 | 283 | dynamic_range_end = str(IPAddress( | 251 | dynamic_range.netaddr_iprange.last)) |
4541 | 284 | dynamic_range.netaddr_iprange.last)) | 252 | subnet = reload_object(subnet) |
4542 | 285 | subnet = reload_object(subnet) | 253 | e = self.assertRaises( |
4543 | 286 | with transaction.atomic(): | 254 | StaticIPAddressUnavailable, |
4544 | 287 | e = self.assertRaises( | 255 | StaticIPAddress.objects.allocate_new, |
4545 | 288 | StaticIPAddressUnavailable, | 256 | subnet, factory.pick_enum( |
4546 | 289 | StaticIPAddress.objects.allocate_new, | 257 | IPADDRESS_TYPE, but_not=[ |
4547 | 290 | subnet, factory.pick_enum( | 258 | IPADDRESS_TYPE.DHCP, |
4548 | 291 | IPADDRESS_TYPE, but_not=[ | 259 | IPADDRESS_TYPE.DISCOVERED, |
4549 | 292 | IPADDRESS_TYPE.DHCP, | 260 | IPADDRESS_TYPE.USER_RESERVED, |
4550 | 293 | IPADDRESS_TYPE.DISCOVERED, | 261 | ]), |
4551 | 294 | IPADDRESS_TYPE.USER_RESERVED, | 262 | requested_address=requested_address) |
4552 | 295 | ]), | 263 | self.assertEqual( |
4553 | 296 | requested_address=requested_address) | 264 | "%s is within the dynamic range from %s to %s" % ( |
4554 | 297 | self.assertEqual( | 265 | requested_address, requested_address, dynamic_range_end), |
4555 | 298 | "%s is within the dynamic range from %s to %s" % ( | 266 | str(e)) |
4533 | 299 | requested_address, requested_address, dynamic_range_end), | ||
4534 | 300 | str(e)) | ||
4556 | 301 | 267 | ||
4557 | 302 | @transactional | ||
4558 | 303 | def test_allocate_new_raises_when_alloc_type_is_None(self): | 268 | def test_allocate_new_raises_when_alloc_type_is_None(self): |
4559 | 304 | error = self.assertRaises( | 269 | error = self.assertRaises( |
4560 | 305 | ValueError, StaticIPAddress.objects.allocate_new, | 270 | ValueError, StaticIPAddress.objects.allocate_new, |
4561 | @@ -308,7 +273,6 @@ | |||
4562 | 308 | "IP address type None is not allowed to use allocate_new.", | 273 | "IP address type None is not allowed to use allocate_new.", |
4563 | 309 | str(error)) | 274 | str(error)) |
4564 | 310 | 275 | ||
4565 | 311 | @transactional | ||
4566 | 312 | def test_allocate_new_raises_when_alloc_type_is_not_allowed(self): | 276 | def test_allocate_new_raises_when_alloc_type_is_not_allowed(self): |
4567 | 313 | error = self.assertRaises( | 277 | error = self.assertRaises( |
4568 | 314 | ValueError, StaticIPAddress.objects.allocate_new, | 278 | ValueError, StaticIPAddress.objects.allocate_new, |
4569 | @@ -317,32 +281,98 @@ | |||
4570 | 317 | "IP address type 5 is not allowed to use allocate_new.", | 281 | "IP address type 5 is not allowed to use allocate_new.", |
4571 | 318 | str(error)) | 282 | str(error)) |
4572 | 319 | 283 | ||
4573 | 320 | @transactional | ||
4574 | 321 | def test_allocate_new_uses_staticip_acquire_lock(self): | ||
4575 | 322 | lock = self.patch(locks, 'staticip_acquire') | ||
4576 | 323 | subnet = factory.make_ipv4_Subnet_with_IPRanges() | ||
4577 | 324 | ipaddress = StaticIPAddress.objects.allocate_new(subnet) | ||
4578 | 325 | self.assertIsInstance(ipaddress, StaticIPAddress) | ||
4579 | 326 | self.assertThat(lock.__enter__, MockCalledOnceWith()) | ||
4580 | 327 | self.assertThat( | ||
4581 | 328 | lock.__exit__, MockCalledOnceWith(None, None, None)) | ||
4582 | 329 | |||
4583 | 330 | def test_allocate_new_raises_when_addresses_exhausted(self): | 284 | def test_allocate_new_raises_when_addresses_exhausted(self): |
4584 | 331 | network = "192.168.230.0/24" | 285 | network = "192.168.230.0/24" |
4595 | 332 | with transaction.atomic(): | 286 | subnet = factory.make_Subnet(cidr=network) |
4596 | 333 | subnet = factory.make_Subnet(cidr=network) | 287 | factory.make_IPRange( |
4597 | 334 | factory.make_IPRange( | 288 | subnet, '192.168.230.1', '192.168.230.254', |
4598 | 335 | subnet, '192.168.230.1', '192.168.230.254', | 289 | type=IPRANGE_TYPE.RESERVED) |
4599 | 336 | type=IPRANGE_TYPE.RESERVED) | 290 | e = self.assertRaises( |
4600 | 337 | with transaction.atomic(): | 291 | StaticIPAddressExhaustion, |
4601 | 338 | e = self.assertRaises( | 292 | StaticIPAddress.objects.allocate_new, |
4602 | 339 | StaticIPAddressExhaustion, | 293 | subnet) |
4593 | 340 | StaticIPAddress.objects.allocate_new, | ||
4594 | 341 | subnet) | ||
4603 | 342 | self.assertEqual( | 294 | self.assertEqual( |
4604 | 343 | "No more IPs available in subnet: %s." % subnet.cidr, | 295 | "No more IPs available in subnet: %s." % subnet.cidr, |
4605 | 344 | str(e)) | 296 | str(e)) |
4606 | 345 | 297 | ||
4607 | 298 | def test_allocate_new_requests_retry_when_free_address_taken(self): | ||
4608 | 299 | set_ip_address = self.patch(StaticIPAddress, "set_ip_address") | ||
4609 | 300 | set_ip_address.side_effect = orm.make_unique_violation() | ||
4610 | 301 | with orm.retry_context: | ||
4611 | 302 | # A retry has been requested. | ||
4612 | 303 | self.assertRaises( | ||
4613 | 304 | orm.RetryTransaction, StaticIPAddress.objects.allocate_new, | ||
4614 | 305 | subnet=factory.make_managed_Subnet()) | ||
4615 | 306 | # Aquisition of `address_allocation` is pending. | ||
4616 | 307 | self.assertThat( | ||
4617 | 308 | list(orm.retry_context.stack._cm_pending), | ||
4618 | 309 | Equals([locks.address_allocation])) | ||
4619 | 310 | |||
4620 | 311 | def test_allocate_new_propagates_other_integrity_errors(self): | ||
4621 | 312 | set_ip_address = self.patch(StaticIPAddress, "set_ip_address") | ||
4622 | 313 | set_ip_address.side_effect = orm.make_unique_violation() | ||
4623 | 314 | set_ip_address.side_effect.__cause__.pgcode = FOREIGN_KEY_VIOLATION | ||
4624 | 315 | with orm.retry_context: | ||
4625 | 316 | # An integrity error that's not `UNIQUE_VIOLATION` is propagated. | ||
4626 | 317 | self.assertRaises( | ||
4627 | 318 | IntegrityError, StaticIPAddress.objects.allocate_new, | ||
4628 | 319 | subnet=factory.make_managed_Subnet()) | ||
4629 | 320 | # There is no pending retry context. | ||
4630 | 321 | self.assertThat( | ||
4631 | 322 | orm.retry_context.stack._cm_pending, | ||
4632 | 323 | HasLength(0)) | ||
4633 | 324 | |||
4634 | 325 | |||
4635 | 326 | class TestStaticIPAddressManagerTransactional(MAASTransactionServerTestCase): | ||
4636 | 327 | """Transactional tests for `StaticIPAddressManager.""" | ||
4637 | 328 | |||
4638 | 329 | scenarios = ( | ||
4639 | 330 | ("IPv4", dict(ip_version=4)), | ||
4640 | 331 | ("IPv6", dict(ip_version=6)), | ||
4641 | 332 | ) | ||
4642 | 333 | |||
4643 | 334 | def test_allocate_new_works_under_extreme_concurrency(self): | ||
4644 | 335 | register_view("maasserver_discovery") | ||
4645 | 336 | |||
4646 | 337 | ipv6 = (self.ip_version == 6) | ||
4647 | 338 | subnet = factory.make_managed_Subnet(ipv6=ipv6) | ||
4648 | 339 | count = 20 # Allocate this number of IP addresses. | ||
4649 | 340 | concurrency = threading.Semaphore(16) | ||
4650 | 341 | mutex = threading.Lock() | ||
4651 | 342 | results = [] | ||
4652 | 343 | |||
4653 | 344 | @transactional | ||
4654 | 345 | def allocate(): | ||
4655 | 346 | return StaticIPAddress.objects.allocate_new(subnet) | ||
4656 | 347 | |||
4657 | 348 | def allocate_one(): | ||
4658 | 349 | try: | ||
4659 | 350 | with concurrency: | ||
4660 | 351 | sip = allocate() | ||
4661 | 352 | except: | ||
4662 | 353 | failure = Failure() | ||
4663 | 354 | with mutex: | ||
4664 | 355 | results.append(failure) | ||
4665 | 356 | else: | ||
4666 | 357 | with mutex: | ||
4667 | 358 | results.append(sip) | ||
4668 | 359 | |||
4669 | 360 | threads = [ | ||
4670 | 361 | threading.Thread(target=allocate_one) | ||
4671 | 362 | for _ in range(count) | ||
4672 | 363 | ] | ||
4673 | 364 | |||
4674 | 365 | for thread in threads: | ||
4675 | 366 | thread.start() | ||
4676 | 367 | for thread in threads: | ||
4677 | 368 | thread.join() | ||
4678 | 369 | |||
4679 | 370 | self.assertThat(results, AllMatch(IsInstance(StaticIPAddress))) | ||
4680 | 371 | ips = {sip.ip for sip in results} | ||
4681 | 372 | self.assertThat(ips, HasLength(count)) | ||
4682 | 373 | self.assertThat(ips, AllMatch( | ||
4683 | 374 | AfterPreprocessing(subnet.is_valid_static_ip, Is(True)))) | ||
4684 | 375 | |||
4685 | 346 | 376 | ||
4686 | 347 | class TestStaticIPAddressManagerMapping(MAASServerTestCase): | 377 | class TestStaticIPAddressManagerMapping(MAASServerTestCase): |
4687 | 348 | """Tests for get_hostname_ip_mapping().""" | 378 | """Tests for get_hostname_ip_mapping().""" |
4688 | 349 | 379 | ||
4689 | === modified file 'src/maasserver/models/tests/test_subnet.py' | |||
4690 | --- src/maasserver/models/tests/test_subnet.py 2016-10-18 00:19:51 +0000 | |||
4691 | +++ src/maasserver/models/tests/test_subnet.py 2016-12-07 15:50:52 +0000 | |||
4692 | @@ -35,6 +35,7 @@ | |||
4693 | 35 | from maasserver.testing.factory import factory | 35 | from maasserver.testing.factory import factory |
4694 | 36 | from maasserver.testing.orm import rollback | 36 | from maasserver.testing.orm import rollback |
4695 | 37 | from maasserver.testing.testcase import MAASServerTestCase | 37 | from maasserver.testing.testcase import MAASServerTestCase |
4696 | 38 | from maasserver.utils.orm import reload_object | ||
4697 | 38 | from maastesting.matchers import DocTestMatches | 39 | from maastesting.matchers import DocTestMatches |
4698 | 39 | from netaddr import ( | 40 | from netaddr import ( |
4699 | 40 | AddrFormatError, | 41 | AddrFormatError, |
4700 | @@ -615,7 +616,7 @@ | |||
4701 | 615 | parent, subnet.get_smallest_enclosing_sane_subnet()) | 616 | parent, subnet.get_smallest_enclosing_sane_subnet()) |
4702 | 616 | 617 | ||
4703 | 617 | def test_cannot_delete_with_dhcp_enabled(self): | 618 | def test_cannot_delete_with_dhcp_enabled(self): |
4705 | 618 | subnet = factory.make_managed_Subnet(ipv6=False) | 619 | subnet = factory.make_ipv4_Subnet_with_IPRanges() |
4706 | 619 | with ExpectedException(ValidationError, ".*servicing a dynamic.*"): | 620 | with ExpectedException(ValidationError, ".*servicing a dynamic.*"): |
4707 | 620 | subnet.delete() | 621 | subnet.delete() |
4708 | 621 | 622 | ||
4709 | @@ -898,13 +899,37 @@ | |||
4710 | 898 | 899 | ||
4711 | 899 | class TestSubnetGetNextIPForAllocation(MAASServerTestCase): | 900 | class TestSubnetGetNextIPForAllocation(MAASServerTestCase): |
4712 | 900 | 901 | ||
4713 | 902 | scenarios = ( | ||
4714 | 903 | ("managed", {'managed': True}), | ||
4715 | 904 | ("unmanaged", {'managed': False}), | ||
4716 | 905 | ) | ||
4717 | 906 | |||
4718 | 907 | def make_Subnet(self, *args, **kwargs): | ||
4719 | 908 | """Helper to create a subnet for this test suite. | ||
4720 | 909 | |||
4721 | 910 | Eclipses the entire subnet with an IPRange of type RESERVED, so that | ||
4722 | 911 | unmanaged and managed test scenarios are expected to behave the same. | ||
4723 | 912 | """ | ||
4724 | 913 | cidr = kwargs.get('cidr') | ||
4725 | 914 | network = IPNetwork(cidr) | ||
4726 | 915 | # Note: these tests assume IPv4. | ||
4727 | 916 | first = str(IPAddress(network.first + 1)) | ||
4728 | 917 | last = str(IPAddress(network.last - 1)) | ||
4729 | 918 | subnet = factory.make_Subnet(*args, managed=self.managed, **kwargs) | ||
4730 | 919 | if not self.managed: | ||
4731 | 920 | factory.make_IPRange( | ||
4732 | 921 | subnet, start_ip=first, end_ip=last, | ||
4733 | 922 | type=IPRANGE_TYPE.RESERVED) | ||
4734 | 923 | subnet = reload_object(subnet) | ||
4735 | 924 | return subnet | ||
4736 | 925 | |||
4737 | 901 | def setUp(self): | 926 | def setUp(self): |
4738 | 902 | register_view("maasserver_discovery") | 927 | register_view("maasserver_discovery") |
4739 | 903 | return super().setUp() | 928 | return super().setUp() |
4740 | 904 | 929 | ||
4741 | 905 | def test__raises_if_no_free_addresses(self): | 930 | def test__raises_if_no_free_addresses(self): |
4742 | 906 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 931 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4744 | 907 | subnet = factory.make_Subnet( | 932 | subnet = self.make_Subnet( |
4745 | 908 | cidr="10.0.0.0/30", gateway_ip="10.0.0.1", | 933 | cidr="10.0.0.0/30", gateway_ip="10.0.0.1", |
4746 | 909 | dns_servers=["10.0.0.2"]) | 934 | dns_servers=["10.0.0.2"]) |
4747 | 910 | with ExpectedException( | 935 | with ExpectedException( |
4748 | @@ -914,35 +939,39 @@ | |||
4749 | 914 | 939 | ||
4750 | 915 | def test__allocates_next_free_address(self): | 940 | def test__allocates_next_free_address(self): |
4751 | 916 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 941 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4754 | 917 | subnet = factory.make_Subnet( | 942 | subnet = self.make_Subnet( |
4755 | 918 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) | 943 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None, |
4756 | 944 | ) | ||
4757 | 919 | ip = subnet.get_next_ip_for_allocation() | 945 | ip = subnet.get_next_ip_for_allocation() |
4758 | 920 | self.assertThat(ip, Equals("10.0.0.1")) | 946 | self.assertThat(ip, Equals("10.0.0.1")) |
4759 | 921 | 947 | ||
4760 | 922 | def test__avoids_gateway_ip(self): | 948 | def test__avoids_gateway_ip(self): |
4761 | 923 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 949 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4764 | 924 | subnet = factory.make_Subnet( | 950 | subnet = self.make_Subnet( |
4765 | 925 | cidr="10.0.0.0/30", gateway_ip="10.0.0.1", dns_servers=None) | 951 | cidr="10.0.0.0/30", gateway_ip="10.0.0.1", dns_servers=None, |
4766 | 952 | ) | ||
4767 | 926 | ip = subnet.get_next_ip_for_allocation() | 953 | ip = subnet.get_next_ip_for_allocation() |
4768 | 927 | self.assertThat(ip, Equals("10.0.0.2")) | 954 | self.assertThat(ip, Equals("10.0.0.2")) |
4769 | 928 | 955 | ||
4770 | 929 | def test__avoids_excluded_addresses(self): | 956 | def test__avoids_excluded_addresses(self): |
4771 | 930 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 957 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4772 | 931 | subnet = factory.make_Subnet( | 958 | subnet = factory.make_Subnet( |
4774 | 932 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) | 959 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None, |
4775 | 960 | ) | ||
4776 | 933 | ip = subnet.get_next_ip_for_allocation(exclude_addresses=["10.0.0.1"]) | 961 | ip = subnet.get_next_ip_for_allocation(exclude_addresses=["10.0.0.1"]) |
4777 | 934 | self.assertThat(ip, Equals("10.0.0.2")) | 962 | self.assertThat(ip, Equals("10.0.0.2")) |
4778 | 935 | 963 | ||
4779 | 936 | def test__avoids_dns_servers(self): | 964 | def test__avoids_dns_servers(self): |
4780 | 937 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 965 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4781 | 938 | subnet = factory.make_Subnet( | 966 | subnet = factory.make_Subnet( |
4783 | 939 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=["10.0.0.1"]) | 967 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=["10.0.0.1"], |
4784 | 968 | ) | ||
4785 | 940 | ip = subnet.get_next_ip_for_allocation() | 969 | ip = subnet.get_next_ip_for_allocation() |
4786 | 941 | self.assertThat(ip, Equals("10.0.0.2")) | 970 | self.assertThat(ip, Equals("10.0.0.2")) |
4787 | 942 | 971 | ||
4788 | 943 | def test__avoids_observed_neighbours(self): | 972 | def test__avoids_observed_neighbours(self): |
4789 | 944 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 973 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4791 | 945 | subnet = factory.make_Subnet( | 974 | subnet = self.make_Subnet( |
4792 | 946 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) | 975 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) |
4793 | 947 | rackif = factory.make_Interface(vlan=subnet.vlan) | 976 | rackif = factory.make_Interface(vlan=subnet.vlan) |
4794 | 948 | factory.make_Discovery(ip="10.0.0.1", interface=rackif) | 977 | factory.make_Discovery(ip="10.0.0.1", interface=rackif) |
4795 | @@ -951,7 +980,7 @@ | |||
4796 | 951 | 980 | ||
4797 | 952 | def test__logs_if_suggests_previously_observed_neighbour(self): | 981 | def test__logs_if_suggests_previously_observed_neighbour(self): |
4798 | 953 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. | 982 | # Note: 10.0.0.0/30 --> 10.0.0.1 and 10.0.0.0.2 are usable. |
4800 | 954 | subnet = factory.make_Subnet( | 983 | subnet = self.make_Subnet( |
4801 | 955 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) | 984 | cidr="10.0.0.0/30", gateway_ip=None, dns_servers=None) |
4802 | 956 | rackif = factory.make_Interface(vlan=subnet.vlan) | 985 | rackif = factory.make_Interface(vlan=subnet.vlan) |
4803 | 957 | now = datetime.now() | 986 | now = datetime.now() |
4804 | @@ -969,7 +998,7 @@ | |||
4805 | 969 | 998 | ||
4806 | 970 | def test__uses_smallest_free_range_when_not_considering_neighbours(self): | 999 | def test__uses_smallest_free_range_when_not_considering_neighbours(self): |
4807 | 971 | # Note: 10.0.0.0/29 --> 10.0.0.1 through 10.0.0.0.6 are usable. | 1000 | # Note: 10.0.0.0/29 --> 10.0.0.1 through 10.0.0.0.6 are usable. |
4809 | 972 | subnet = factory.make_Subnet( | 1001 | subnet = self.make_Subnet( |
4810 | 973 | cidr="10.0.0.0/29", gateway_ip=None, dns_servers=None) | 1002 | cidr="10.0.0.0/29", gateway_ip=None, dns_servers=None) |
4811 | 974 | # With .4 in use, the free ranges are {1, 2, 3}, {5, 6}. So MAAS should | 1003 | # With .4 in use, the free ranges are {1, 2, 3}, {5, 6}. So MAAS should |
4812 | 975 | # select 10.0.0.5, since that is the first address in the smallest | 1004 | # select 10.0.0.5, since that is the first address in the smallest |
4813 | @@ -977,3 +1006,48 @@ | |||
4814 | 977 | factory.make_StaticIPAddress(ip="10.0.0.4", cidr="10.0.0.0/29") | 1006 | factory.make_StaticIPAddress(ip="10.0.0.4", cidr="10.0.0.0/29") |
4815 | 978 | ip = subnet.get_next_ip_for_allocation() | 1007 | ip = subnet.get_next_ip_for_allocation() |
4816 | 979 | self.assertThat(ip, Equals("10.0.0.5")) | 1008 | self.assertThat(ip, Equals("10.0.0.5")) |
4817 | 1009 | |||
4818 | 1010 | |||
4819 | 1011 | class TestUnmanagedSubnets(MAASServerTestCase): | ||
4820 | 1012 | def setUp(self): | ||
4821 | 1013 | register_view("maasserver_discovery") | ||
4822 | 1014 | return super().setUp() | ||
4823 | 1015 | |||
4824 | 1016 | def test__allocation_uses_reserved_range(self): | ||
4825 | 1017 | # Note: 10.0.0.0/29 --> 10.0.0.1 through 10.0.0.0.6 are usable. | ||
4826 | 1018 | subnet = factory.make_Subnet( | ||
4827 | 1019 | cidr="10.0.0.0/29", gateway_ip=None, dns_servers=None, | ||
4828 | 1020 | managed=False) | ||
4829 | 1021 | range1 = factory.make_IPRange( | ||
4830 | 1022 | subnet, start_ip='10.0.0.1', end_ip='10.0.0.1', | ||
4831 | 1023 | type=IPRANGE_TYPE.RESERVED) | ||
4832 | 1024 | subnet = reload_object(subnet) | ||
4833 | 1025 | ip = subnet.get_next_ip_for_allocation() | ||
4834 | 1026 | self.assertThat(ip, Equals("10.0.0.1")) | ||
4835 | 1027 | range1.delete() | ||
4836 | 1028 | factory.make_IPRange( | ||
4837 | 1029 | subnet, start_ip='10.0.0.6', end_ip='10.0.0.6', | ||
4838 | 1030 | type=IPRANGE_TYPE.RESERVED) | ||
4839 | 1031 | subnet = reload_object(subnet) | ||
4840 | 1032 | ip = subnet.get_next_ip_for_allocation() | ||
4841 | 1033 | self.assertThat(ip, Equals("10.0.0.6")) | ||
4842 | 1034 | |||
4843 | 1035 | def test__allocation_uses_multiple_reserved_ranges(self): | ||
4844 | 1036 | # Note: 10.0.0.0/29 --> 10.0.0.1 through 10.0.0.0.6 are usable. | ||
4845 | 1037 | subnet = factory.make_Subnet( | ||
4846 | 1038 | cidr="10.0.0.0/29", gateway_ip=None, dns_servers=None, | ||
4847 | 1039 | managed=False) | ||
4848 | 1040 | factory.make_IPRange( | ||
4849 | 1041 | subnet, start_ip='10.0.0.3', end_ip='10.0.0.4', | ||
4850 | 1042 | type=IPRANGE_TYPE.RESERVED) | ||
4851 | 1043 | subnet = reload_object(subnet) | ||
4852 | 1044 | ip = subnet.get_next_ip_for_allocation() | ||
4853 | 1045 | self.assertThat(ip, Equals("10.0.0.3")) | ||
4854 | 1046 | factory.make_StaticIPAddress(ip) | ||
4855 | 1047 | ip = subnet.get_next_ip_for_allocation() | ||
4856 | 1048 | self.assertThat(ip, Equals("10.0.0.4")) | ||
4857 | 1049 | factory.make_StaticIPAddress(ip) | ||
4858 | 1050 | with ExpectedException( | ||
4859 | 1051 | StaticIPAddressExhaustion, | ||
4860 | 1052 | "No more IPs available in subnet: 10.0.0.0/29."): | ||
4861 | 1053 | subnet.get_next_ip_for_allocation() | ||
4862 | 980 | 1054 | ||
4863 | === modified file 'src/maasserver/models/tests/test_vlan.py' | |||
4864 | --- src/maasserver/models/tests/test_vlan.py 2016-10-19 18:06:01 +0000 | |||
4865 | +++ src/maasserver/models/tests/test_vlan.py 2016-12-07 15:50:52 +0000 | |||
4866 | @@ -88,6 +88,14 @@ | |||
4867 | 88 | 88 | ||
4868 | 89 | class TestVLAN(MAASServerTestCase): | 89 | class TestVLAN(MAASServerTestCase): |
4869 | 90 | 90 | ||
4870 | 91 | def test_delete_relay_vlan_doesnt_delete_vlan(self): | ||
4871 | 92 | relay_vlan = factory.make_VLAN() | ||
4872 | 93 | vlan = factory.make_VLAN(relay_vlan=relay_vlan) | ||
4873 | 94 | relay_vlan.delete() | ||
4874 | 95 | vlan = reload_object(vlan) | ||
4875 | 96 | self.assertIsNotNone(vlan) | ||
4876 | 97 | self.assertIsNone(vlan.relay_vlan) | ||
4877 | 98 | |||
4878 | 91 | def test_get_name_for_default_vlan_is_untagged(self): | 99 | def test_get_name_for_default_vlan_is_untagged(self): |
4879 | 92 | fabric = factory.make_Fabric() | 100 | fabric = factory.make_Fabric() |
4880 | 93 | self.assertEqual("untagged", fabric.get_default_vlan().get_name()) | 101 | self.assertEqual("untagged", fabric.get_default_vlan().get_name()) |
4881 | 94 | 102 | ||
4882 | === modified file 'src/maasserver/models/vlan.py' | |||
4883 | --- src/maasserver/models/vlan.py 2016-10-20 19:39:48 +0000 | |||
4884 | +++ src/maasserver/models/vlan.py 2016-12-07 15:50:52 +0000 | |||
4885 | @@ -14,6 +14,7 @@ | |||
4886 | 14 | from django.db.models import ( | 14 | from django.db.models import ( |
4887 | 15 | BooleanField, | 15 | BooleanField, |
4888 | 16 | CharField, | 16 | CharField, |
4889 | 17 | deletion, | ||
4890 | 17 | ForeignKey, | 18 | ForeignKey, |
4891 | 18 | IntegerField, | 19 | IntegerField, |
4892 | 19 | Manager, | 20 | Manager, |
4893 | @@ -169,6 +170,10 @@ | |||
4894 | 169 | 'RackController', null=True, blank=True, editable=True, | 170 | 'RackController', null=True, blank=True, editable=True, |
4895 | 170 | related_name='+') | 171 | related_name='+') |
4896 | 171 | 172 | ||
4897 | 173 | relay_vlan = ForeignKey( | ||
4898 | 174 | 'self', null=True, blank=True, editable=True, | ||
4899 | 175 | related_name='relay_vlans', on_delete=deletion.SET_NULL) | ||
4900 | 176 | |||
4901 | 172 | def __str__(self): | 177 | def __str__(self): |
4902 | 173 | return "%s.%s" % (self.fabric.get_name(), self.get_name()) | 178 | return "%s.%s" % (self.fabric.get_name(), self.get_name()) |
4903 | 174 | 179 | ||
4904 | 175 | 180 | ||
4905 | === modified file 'src/maasserver/node_action.py' | |||
4906 | --- src/maasserver/node_action.py 2016-08-16 09:31:16 +0000 | |||
4907 | +++ src/maasserver/node_action.py 2016-12-07 15:50:52 +0000 | |||
4908 | @@ -230,10 +230,6 @@ | |||
4909 | 230 | self, enable_ssh=False, skip_networking=False, | 230 | self, enable_ssh=False, skip_networking=False, |
4910 | 231 | skip_storage=False): | 231 | skip_storage=False): |
4911 | 232 | """See `NodeAction.execute`.""" | 232 | """See `NodeAction.execute`.""" |
4912 | 233 | if self.node.power_state == POWER_STATE.ON: | ||
4913 | 234 | raise NodeActionError( | ||
4914 | 235 | "Unable to be commissioned because the power is currently on.") | ||
4915 | 236 | |||
4916 | 237 | try: | 233 | try: |
4917 | 238 | self.node.start_commissioning( | 234 | self.node.start_commissioning( |
4918 | 239 | self.user, | 235 | self.user, |
4919 | 240 | 236 | ||
4920 | === modified file 'src/maasserver/rpc/nodes.py' | |||
4921 | --- src/maasserver/rpc/nodes.py 2016-10-20 08:41:30 +0000 | |||
4922 | +++ src/maasserver/rpc/nodes.py 2016-12-07 15:50:52 +0000 | |||
4923 | @@ -30,6 +30,7 @@ | |||
4924 | 30 | ) | 30 | ) |
4925 | 31 | from maasserver.models.timestampedmodel import now | 31 | from maasserver.models.timestampedmodel import now |
4926 | 32 | from maasserver.utils.orm import transactional | 32 | from maasserver.utils.orm import transactional |
4927 | 33 | from provisioningserver.drivers.power import PowerDriverRegistry | ||
4928 | 33 | from provisioningserver.rpc.exceptions import ( | 34 | from provisioningserver.rpc.exceptions import ( |
4929 | 34 | CommissionNodeFailed, | 35 | CommissionNodeFailed, |
4930 | 35 | NodeAlreadyExists, | 36 | NodeAlreadyExists, |
4931 | @@ -66,16 +67,16 @@ | |||
4932 | 66 | :return: A generator yielding `dict`s. | 67 | :return: A generator yielding `dict`s. |
4933 | 67 | """ | 68 | """ |
4934 | 68 | five_minutes_ago = now() - timedelta(minutes=5) | 69 | five_minutes_ago = now() - timedelta(minutes=5) |
4940 | 69 | 70 | queryable_power_types = [ | |
4941 | 70 | # This is meant to be temporary until all the power types support querying | 71 | driver.name |
4942 | 71 | # the power state of a node. See the definition of QUERY_POWER_TYPES for | 72 | for _, driver in PowerDriverRegistry |
4943 | 72 | # more information. | 73 | if driver.queryable |
4944 | 73 | from provisioningserver.power import QUERY_POWER_TYPES | 74 | ] |
4945 | 74 | 75 | ||
4946 | 75 | nodes_unchecked = ( | 76 | nodes_unchecked = ( |
4947 | 76 | nodes | 77 | nodes |
4948 | 77 | .filter(power_state_queried=None) | 78 | .filter(power_state_queried=None) |
4950 | 78 | .filter(bmc__power_type__in=QUERY_POWER_TYPES) | 79 | .filter(bmc__power_type__in=queryable_power_types) |
4951 | 79 | .exclude(status=NODE_STATUS.BROKEN) | 80 | .exclude(status=NODE_STATUS.BROKEN) |
4952 | 80 | .distinct() | 81 | .distinct() |
4953 | 81 | ) | 82 | ) |
4954 | @@ -83,7 +84,7 @@ | |||
4955 | 83 | nodes | 84 | nodes |
4956 | 84 | .exclude(power_state_queried=None) | 85 | .exclude(power_state_queried=None) |
4957 | 85 | .exclude(power_state_queried__gt=five_minutes_ago) | 86 | .exclude(power_state_queried__gt=five_minutes_ago) |
4959 | 86 | .filter(bmc__power_type__in=QUERY_POWER_TYPES) | 87 | .filter(bmc__power_type__in=queryable_power_types) |
4960 | 87 | .exclude(status=NODE_STATUS.BROKEN) | 88 | .exclude(status=NODE_STATUS.BROKEN) |
4961 | 88 | .order_by("power_state_queried", "system_id") | 89 | .order_by("power_state_queried", "system_id") |
4962 | 89 | .distinct() | 90 | .distinct() |
4963 | 90 | 91 | ||
4964 | === modified file 'src/maasserver/rpc/rackcontrollers.py' | |||
4965 | --- src/maasserver/rpc/rackcontrollers.py 2016-10-17 06:42:10 +0000 | |||
4966 | +++ src/maasserver/rpc/rackcontrollers.py 2016-12-07 15:50:52 +0000 | |||
4967 | @@ -26,7 +26,6 @@ | |||
4968 | 26 | RegionController, | 26 | RegionController, |
4969 | 27 | StaticIPAddress, | 27 | StaticIPAddress, |
4970 | 28 | ) | 28 | ) |
4971 | 29 | from maasserver.models.node import typecast_node | ||
4972 | 30 | from maasserver.models.timestampedmodel import now | 29 | from maasserver.models.timestampedmodel import now |
4973 | 31 | from maasserver.utils import synchronised | 30 | from maasserver.utils import synchronised |
4974 | 32 | from maasserver.utils.orm import ( | 31 | from maasserver.utils.orm import ( |
4975 | @@ -120,7 +119,7 @@ | |||
4976 | 120 | node.node_type = NODE_TYPE.RACK_CONTROLLER | 119 | node.node_type = NODE_TYPE.RACK_CONTROLLER |
4977 | 121 | node.save() | 120 | node.save() |
4978 | 122 | 121 | ||
4980 | 123 | rackcontroller = typecast_node(node, RackController) | 122 | rackcontroller = node.as_rack_controller() |
4981 | 124 | 123 | ||
4982 | 125 | # Update `rackcontroller.url` from the given URL, if it has changed. | 124 | # Update `rackcontroller.url` from the given URL, if it has changed. |
4983 | 126 | update_fields = [] | 125 | update_fields = [] |
4984 | 127 | 126 | ||
4985 | === modified file 'src/maasserver/rpc/regionservice.py' | |||
4986 | --- src/maasserver/rpc/regionservice.py 2016-10-28 15:58:32 +0000 | |||
4987 | +++ src/maasserver/rpc/regionservice.py 2016-12-07 15:50:52 +0000 | |||
4988 | @@ -642,19 +642,17 @@ | |||
4989 | 642 | # and into the database. | 642 | # and into the database. |
4990 | 643 | self.ident = rack_controller.system_id | 643 | self.ident = rack_controller.system_id |
4991 | 644 | self.factory.service._addConnectionFor(self.ident, self) | 644 | self.factory.service._addConnectionFor(self.ident, self) |
4992 | 645 | |||
4993 | 646 | # A local rack is treated differently to one that's remote. | ||
4994 | 645 | self.host = self.transport.getHost() | 647 | self.host = self.transport.getHost() |
4995 | 646 | self.hostIsRemote = isinstance( | 648 | self.hostIsRemote = isinstance( |
4996 | 647 | self.host, (IPv4Address, IPv6Address)) | 649 | self.host, (IPv4Address, IPv6Address)) |
4997 | 648 | 650 | ||
5000 | 649 | # Get the region ID if we're dealing with a non-local rack; we | 651 | # Only register the connection into the database when it's a valid |
4999 | 650 | # won't need to bother for local racks. |
The diff has been truncated for viewing.