~ubuntu-branches/ubuntu/utopic/maas/utopic-security

« back to all changes in this revision

Viewing changes to src/maasserver/models/tests/test_node.py

  • Committer: Package Import Robot
  • Author(s): Julian Edwards, Julian Edwards, Andres Rodriguez
  • Date: 2014-08-21 18:38:27 UTC
  • mfrom: (1.2.34)
  • Revision ID: package-import@ubuntu.com-20140821183827-9xyb5u2o4l8g3zxj
Tags: 1.6.1+bzr2550-0ubuntu1
* New upstream bugfix release:
  - Auto-link node MACs to Networks (LP: #1341619)

[ Julian Edwards ]
* debian/maas-region-controller.postinst: Don't restart RabbitMQ on
  upgrades, just ensure it's running.  Should prevent a race with the
  cluster celery restarting.
* debian/rules: Pull upstream branch from the right place.

[ Andres Rodriguez ]
* debian/maas-region-controller.postinst: Ensure cluster celery is
  started if it also runs on the region.

Show diffs side-by-side

added added

removed removed

Lines of Context:
17
17
from datetime import timedelta
18
18
import random
19
19
 
 
20
import celery
20
21
from django.core.exceptions import ValidationError
 
22
from maasserver import dns as dns_module
21
23
from maasserver.clusterrpc.power_parameters import get_power_types
22
24
from maasserver.enum import (
23
 
    DISTRO_SERIES,
 
25
    IPADDRESS_TYPE,
24
26
    NODE_PERMISSION,
25
27
    NODE_STATUS,
26
28
    NODE_STATUS_CHOICES,
28
30
    NODEGROUP_STATUS,
29
31
    NODEGROUPINTERFACE_MANAGEMENT,
30
32
    )
31
 
from maasserver.exceptions import NodeStateViolation
 
33
from maasserver.exceptions import (
 
34
    NodeStateViolation,
 
35
    StaticIPAddressExhaustion,
 
36
    )
32
37
from maasserver.fields import MAC
33
38
from maasserver.models import (
34
39
    Config,
 
40
    LicenseKey,
35
41
    MACAddress,
36
42
    Node,
37
43
    node as node_module,
40
46
from maasserver.models.node import (
41
47
    generate_hostname,
42
48
    NODE_TRANSITIONS,
 
49
    validate_hostname,
 
50
    )
 
51
from maasserver.models.staticipaddress import (
 
52
    StaticIPAddress,
 
53
    StaticIPAddressManager,
43
54
    )
44
55
from maasserver.models.user import create_auth_token
45
56
from maasserver.testing import reload_object
46
57
from maasserver.testing.factory import factory
47
58
from maasserver.testing.testcase import MAASServerTestCase
48
 
from maasserver.utils import map_enum
 
59
from maasserver.utils import (
 
60
    ignore_unused,
 
61
    map_enum,
 
62
    )
49
63
from maastesting.djangotestcase import count_queries
 
64
from maastesting.matchers import (
 
65
    MockCalledOnceWith,
 
66
    MockNotCalled,
 
67
    )
50
68
from maastesting.testcase import MAASTestCase
51
69
from metadataserver import commissioning
52
70
from metadataserver.fields import Bin
55
73
    NodeUserData,
56
74
    )
57
75
from provisioningserver.power.poweraction import PowerAction
 
76
from provisioningserver.tasks import Omshell
58
77
from testtools.matchers import (
59
78
    AllMatch,
60
79
    Contains,
61
80
    Equals,
62
81
    MatchesAll,
63
 
    MatchesListwise,
64
82
    Not,
65
83
    )
66
84
 
83
101
        self.assertEqual(sizes, [len(hostname) for hostname in hostnames])
84
102
 
85
103
 
 
104
class TestHostnameValidator(MAASTestCase):
 
105
    """Tests for the validation of hostnames.
 
106
 
 
107
    Specifications based on:
 
108
        http://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
 
109
 
 
110
    This does not support Internationalized Domain Names.  To do so, we'd have
 
111
    to accept and store unicode, but use the Punycode-encoded version.  The
 
112
    validator would have to validate both versions: the unicode input for
 
113
    invalid characters, and the encoded version for length.
 
114
    """
 
115
    def make_maximum_hostname(self):
 
116
        """Create a hostname of the maximum permitted length.
 
117
 
 
118
        The maximum permitted length is 255 characters.  The last label in the
 
119
        hostname will not be of the maximum length, so tests can still append a
 
120
        character to it without creating an invalid label.
 
121
 
 
122
        The hostname is not randomised, so do not count on it being unique.
 
123
        """
 
124
        # A hostname may contain any number of labels, separated by dots.
 
125
        # Each of the labels has a maximum length of 63 characters, so this has
 
126
        # to be built up from multiple labels.
 
127
        ten_chars = ('a' * 9) + '.'
 
128
        hostname = ten_chars * 25 + ('b' * 5)
 
129
        self.assertEqual(255, len(hostname))
 
130
        return hostname
 
131
 
 
132
    def assertAccepts(self, hostname):
 
133
        """Assertion: the validator accepts `hostname`."""
 
134
        try:
 
135
            validate_hostname(hostname)
 
136
        except ValidationError as e:
 
137
            raise AssertionError(unicode(e))
 
138
 
 
139
    def assertRejects(self, hostname):
 
140
        """Assertion: the validator rejects `hostname`."""
 
141
        self.assertRaises(ValidationError, validate_hostname, hostname)
 
142
 
 
143
    def test_accepts_ascii_letters(self):
 
144
        self.assertAccepts('abcde')
 
145
 
 
146
    def test_accepts_dots(self):
 
147
        self.assertAccepts('abc.def')
 
148
 
 
149
    def test_rejects_adjacent_dots(self):
 
150
        self.assertRejects('abc..def')
 
151
 
 
152
    def test_rejects_leading_dot(self):
 
153
        self.assertRejects('.abc')
 
154
 
 
155
    def test_rejects_trailing_dot(self):
 
156
        self.assertRejects('abc.')
 
157
 
 
158
    def test_accepts_ascii_digits(self):
 
159
        self.assertAccepts('abc123')
 
160
 
 
161
    def test_accepts_leading_digits(self):
 
162
        # Leading digits used to be forbidden, but are now allowed.
 
163
        self.assertAccepts('123abc')
 
164
 
 
165
    def test_rejects_whitespace(self):
 
166
        self.assertRejects('a b')
 
167
        self.assertRejects('a\nb')
 
168
        self.assertRejects('a\tb')
 
169
 
 
170
    def test_rejects_other_ascii_characters(self):
 
171
        self.assertRejects('a?b')
 
172
        self.assertRejects('a!b')
 
173
        self.assertRejects('a,b')
 
174
        self.assertRejects('a:b')
 
175
        self.assertRejects('a;b')
 
176
        self.assertRejects('a+b')
 
177
        self.assertRejects('a=b')
 
178
 
 
179
    def test_accepts_underscore_in_domain(self):
 
180
        self.assertAccepts('host.local_domain')
 
181
 
 
182
    def test_rejects_underscore_in_host(self):
 
183
        self.assertRejects('host_name.local')
 
184
 
 
185
    def test_accepts_hyphen(self):
 
186
        self.assertAccepts('a-b')
 
187
 
 
188
    def test_rejects_hyphen_at_start_of_label(self):
 
189
        self.assertRejects('-ab')
 
190
 
 
191
    def test_rejects_hyphen_at_end_of_label(self):
 
192
        self.assertRejects('ab-')
 
193
 
 
194
    def test_accepts_maximum_valid_length(self):
 
195
        self.assertAccepts(self.make_maximum_hostname())
 
196
 
 
197
    def test_rejects_oversized_hostname(self):
 
198
        self.assertRejects(self.make_maximum_hostname() + 'x')
 
199
 
 
200
    def test_accepts_maximum_label_length(self):
 
201
        self.assertAccepts('a' * 63)
 
202
 
 
203
    def test_rejects_oversized_label(self):
 
204
        self.assertRejects('b' * 64)
 
205
 
 
206
    def test_rejects_nonascii_letter(self):
 
207
        # The \u03be is the Greek letter xi.  Perfectly good letter, just not
 
208
        # ASCII.
 
209
        self.assertRejects('\u03be')
 
210
 
 
211
 
 
212
def make_active_lease(nodegroup=None):
 
213
    """Create a `DHCPLease` on a managed `NodeGroupInterface`."""
 
214
    lease = factory.make_dhcp_lease(nodegroup=nodegroup)
 
215
    factory.make_node_group_interface(
 
216
        lease.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP)
 
217
    return lease
 
218
 
 
219
 
86
220
class NodeTest(MAASServerTestCase):
87
221
 
88
222
    def test_system_id(self):
94
228
        self.assertEqual(len(node.system_id), 41)
95
229
        self.assertTrue(node.system_id.startswith('node-'))
96
230
 
 
231
    def test_hostname_is_validated(self):
 
232
        bad_hostname = '-_?!@*-'
 
233
        self.assertRaises(
 
234
            ValidationError,
 
235
            factory.make_node, hostname=bad_hostname)
 
236
 
97
237
    def test_work_queue_returns_nodegroup_uuid(self):
98
238
        nodegroup = factory.make_node_group()
99
239
        node = factory.make_node(nodegroup=nodegroup)
155
295
            offset += timedelta(1)
156
296
        self.assertEqual(macs[0], node.get_primary_mac().mac_address)
157
297
 
 
298
    def test_get_osystem_returns_default_osystem(self):
 
299
        node = factory.make_node(osystem='')
 
300
        osystem = Config.objects.get_config('default_osystem')
 
301
        self.assertEqual(osystem, node.get_osystem())
 
302
 
158
303
    def test_get_distro_series_returns_default_series(self):
159
 
        node = factory.make_node()
160
 
        series = Config.objects.get_config('commissioning_distro_series')
161
 
        self.assertEqual(series, node.get_distro_series())
162
 
 
163
 
    def test_set_get_distro_series_returns_series(self):
164
 
        node = factory.make_node()
165
 
        series = DISTRO_SERIES.quantal
166
 
        node.set_distro_series(series)
167
 
        self.assertEqual(series, node.get_distro_series())
 
304
        node = factory.make_node(distro_series='')
 
305
        series = Config.objects.get_config('default_distro_series')
 
306
        self.assertEqual(series, node.get_distro_series())
 
307
 
 
308
    def test_get_effective_license_key_returns_node_value(self):
 
309
        license_key = factory.make_name('license_key')
 
310
        node = factory.make_node(license_key=license_key)
 
311
        self.assertEqual(license_key, node.get_effective_license_key())
 
312
 
 
313
    def test_get_effective_license_key_returns_blank(self):
 
314
        node = factory.make_node()
 
315
        self.assertEqual('', node.get_effective_license_key())
 
316
 
 
317
    def test_get_effective_license_key_returns_global(self):
 
318
        license_key = factory.make_name('license_key')
 
319
        osystem = factory.make_name('os')
 
320
        series = factory.make_name('series')
 
321
        LicenseKey.objects.create(
 
322
            osystem=osystem, distro_series=series, license_key=license_key)
 
323
        node = factory.make_node(osystem=osystem, distro_series=series)
 
324
        self.assertEqual(license_key, node.get_effective_license_key())
168
325
 
169
326
    def test_delete_node_deletes_related_mac(self):
170
327
        node = factory.make_node()
177
334
        node = factory.make_node(status=NODE_STATUS.ALLOCATED)
178
335
        self.assertRaises(NodeStateViolation, node.delete)
179
336
 
 
337
    def test_delete_node_also_deletes_related_static_IPs(self):
 
338
        # Prevent actual omshell commands from being called in tasks.
 
339
        self.patch(Omshell, 'remove')
 
340
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
341
        primary_mac = node.get_primary_mac()
 
342
        random_alloc_type = factory.getRandomEnum(
 
343
            IPADDRESS_TYPE, but_not=[IPADDRESS_TYPE.USER_RESERVED])
 
344
        primary_mac.claim_static_ip(alloc_type=random_alloc_type)
 
345
        node.delete()
 
346
        self.assertItemsEqual([], StaticIPAddress.objects.all())
 
347
 
 
348
    def test_delete_node_also_runs_task_to_delete_static_dhcp_maps(self):
 
349
        # Prevent actual omshell commands from being called in tasks.
 
350
        self.patch(Omshell, 'remove')
 
351
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
352
        primary_mac = node.get_primary_mac()
 
353
        primary_mac.claim_static_ip(alloc_type=IPADDRESS_TYPE.STICKY)
 
354
        node.delete()
 
355
        self.assertEqual(
 
356
            ['provisioningserver.tasks.remove_dhcp_host_map'],
 
357
            [task['task'].name for task in self.celery.tasks])
 
358
 
180
359
    def test_delete_node_also_deletes_dhcp_host_map(self):
181
 
        lease = factory.make_dhcp_lease()
 
360
        lease = make_active_lease()
182
361
        node = factory.make_node(nodegroup=lease.nodegroup)
183
362
        node.add_mac_address(lease.mac)
184
 
        mocked_task = self.patch(node_module, "remove_dhcp_host_map")
185
 
        mocked_apply_async = self.patch(mocked_task, "apply_async")
 
363
        # Prevent actual omshell commands from being called in the task.
 
364
        self.patch(Omshell, 'remove')
186
365
        node.delete()
187
 
        args, kwargs = mocked_apply_async.call_args
188
 
        expected = (
189
 
            Equals(kwargs['queue']),
 
366
        self.assertThat(
 
367
            self.celery.tasks[0]['kwargs'],
190
368
            Equals({
191
369
                'ip_address': lease.ip,
192
370
                'server_address': "127.0.0.1",
193
371
                'omapi_key': lease.nodegroup.dhcp_key,
194
372
                }))
195
 
        observed = node.work_queue, kwargs['kwargs']
196
 
        self.assertThat(observed, MatchesListwise(expected))
 
373
 
 
374
    def test_delete_dynamic_host_maps_sends_to_correct_queue(self):
 
375
        lease = make_active_lease()
 
376
        node = factory.make_node(nodegroup=lease.nodegroup)
 
377
        node.add_mac_address(lease.mac)
 
378
        # Prevent actual omshell commands from being called in the task.
 
379
        self.patch(Omshell, 'remove')
 
380
        option_call = self.patch(celery.canvas.Signature, 'set')
 
381
        work_queue = node.work_queue
 
382
        node.delete()
 
383
        args, kwargs = option_call.call_args
 
384
        self.assertEqual(work_queue, kwargs['queue'])
197
385
 
198
386
    def test_delete_node_removes_multiple_host_maps(self):
199
 
        lease1 = factory.make_dhcp_lease()
200
 
        lease2 = factory.make_dhcp_lease(nodegroup=lease1.nodegroup)
 
387
        lease1 = make_active_lease()
 
388
        lease2 = make_active_lease(nodegroup=lease1.nodegroup)
201
389
        node = factory.make_node(nodegroup=lease1.nodegroup)
202
390
        node.add_mac_address(lease1.mac)
203
391
        node.add_mac_address(lease2.mac)
204
 
        mocked_task = self.patch(node_module, "remove_dhcp_host_map")
205
 
        mocked_apply_async = self.patch(mocked_task, "apply_async")
 
392
        # Prevent actual omshell commands from being called in the task.
 
393
        self.patch(Omshell, 'remove')
206
394
        node.delete()
207
 
        self.assertEqual(2, mocked_apply_async.call_count)
 
395
        self.assertEqual(2, len(self.celery.tasks))
208
396
 
209
397
    def test_set_random_hostname_set_hostname(self):
210
398
        # Blank out enlistment_domain.
217
405
        Config.objects.set_config("enlistment_domain", '')
218
406
        existing_node = factory.make_node(hostname='hostname')
219
407
 
220
 
        hostnames = [existing_node.hostname, "new_hostname"]
 
408
        hostnames = [existing_node.hostname, "new-hostname"]
221
409
        self.patch(
222
410
            node_module, "generate_hostname",
223
411
            lambda size: hostnames.pop(0))
224
412
 
225
413
        node = factory.make_node()
226
414
        node.set_random_hostname()
227
 
        self.assertEqual('new_hostname', node.hostname)
 
415
        self.assertEqual('new-hostname', node.hostname)
228
416
 
229
417
    def test_get_effective_power_type_raises_if_not_set(self):
230
418
        node = factory.make_node(power_type='')
303
491
        successful_types = [node_power_types[node] for node in started_nodes]
304
492
        self.assertItemsEqual(configless_power_types, successful_types)
305
493
 
 
494
    def test_get_effective_power_type_no_default_power_address_if_not_virsh(
 
495
            self):
 
496
        node = factory.make_node(power_type="ether_wake")
 
497
        params = node.get_effective_power_parameters()
 
498
        self.assertEqual("", params["power_address"])
 
499
 
 
500
    def test_get_effective_power_type_defaults_power_address_if_virsh(self):
 
501
        node = factory.make_node(power_type="virsh")
 
502
        params = node.get_effective_power_parameters()
 
503
        self.assertEqual("qemu://localhost/system", params["power_address"])
 
504
 
306
505
    def test_get_effective_kernel_options_with_nothing_set(self):
307
506
        node = factory.make_node()
308
507
        self.assertEqual((None, None), node.get_effective_kernel_options())
406
605
            (NODE_STATUS.READY, None, node.agent_name),
407
606
            (node.status, node.owner, ''))
408
607
 
409
 
    def test_ip_addresses_queries_leases(self):
 
608
    def test_release_deletes_static_ip_host_maps(self):
 
609
        user = factory.make_user()
 
610
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
611
            owner=user, status=NODE_STATUS.ALLOCATED)
 
612
        sip = node.get_primary_mac().claim_static_ip()
 
613
        delete_static_host_maps = self.patch(node, 'delete_static_host_maps')
 
614
        node.release()
 
615
        expected = [sip.ip.format()]
 
616
        self.assertThat(delete_static_host_maps, MockCalledOnceWith(expected))
 
617
 
 
618
    def test_delete_static_host_maps(self):
 
619
        user = factory.make_user()
 
620
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
621
            owner=user, status=NODE_STATUS.ALLOCATED)
 
622
        sip = node.get_primary_mac().claim_static_ip()
 
623
        self.patch(Omshell, 'remove')
 
624
        set_call = self.patch(celery.canvas.Signature, 'set')
 
625
        node.delete_static_host_maps([sip.ip.format()])
 
626
        self.assertThat(
 
627
            self.celery.tasks[0]['kwargs'],
 
628
            Equals({
 
629
                'ip_address': sip.ip.format(),
 
630
                'server_address': "127.0.0.1",
 
631
                'omapi_key': node.nodegroup.dhcp_key,
 
632
                }))
 
633
        args, kwargs = set_call.call_args
 
634
        self.assertEqual(node.work_queue, kwargs['queue'])
 
635
 
 
636
    def test_dynamic_ip_addresses_queries_leases(self):
410
637
        node = factory.make_node()
411
638
        macs = [factory.make_mac_address(node=node) for i in range(2)]
412
639
        leases = [
414
641
                nodegroup=node.nodegroup, mac=mac.mac_address)
415
642
            for mac in macs]
416
643
        self.assertItemsEqual(
417
 
            [lease.ip for lease in leases], node.ip_addresses())
 
644
            [lease.ip for lease in leases], node.dynamic_ip_addresses())
418
645
 
419
 
    def test_ip_addresses_uses_result_cache(self):
420
 
        # ip_addresses has a specialized code path for the case where the
421
 
        # node group's set of DHCP leases is already cached in Django's ORM.
422
 
        # This test exercises that code path.
 
646
    def test_dynamic_ip_addresses_uses_result_cache(self):
 
647
        # dynamic_ip_addresses has a specialized code path for the case where
 
648
        # the node group's set of DHCP leases is already cached in Django's
 
649
        # ORM.  This test exercises that code path.
423
650
        node = factory.make_node()
424
651
        macs = [factory.make_mac_address(node=node) for i in range(2)]
425
652
        leases = [
436
663
        query = Node.objects.filter(id=node.id)
437
664
        query = query.prefetch_related('nodegroup__dhcplease_set')
438
665
        # The cache is populated.  This is the condition that triggers the
439
 
        # separate code path in Node.ip_addresses().
 
666
        # separate code path in Node.dynamic_ip_addresses().
440
667
        self.assertIsNotNone(
441
668
            query[0].nodegroup.dhcplease_set.all()._result_cache)
442
669
 
443
 
        # ip_addresses() still returns the node's leased addresses.
444
 
        num_queries, addresses = count_queries(query[0].ip_addresses)
 
670
        # dynamic_ip_addresses() still returns the node's leased addresses.
 
671
        num_queries, addresses = count_queries(query[0].dynamic_ip_addresses)
445
672
        # It only takes one query: to get the node's MAC addresses.
446
673
        self.assertEqual(1, num_queries)
447
674
        # The result is not a query set, so this isn't hiding a further query.
450
677
        # We still get exactly the right IP addresses.
451
678
        self.assertItemsEqual([lease.ip for lease in leases], addresses)
452
679
 
453
 
    def test_ip_addresses_filters_by_mac_addresses(self):
 
680
    def test_dynamic_ip_addresses_filters_by_mac_addresses(self):
454
681
        node = factory.make_node()
455
682
        # Another node in the same nodegroup has some IP leases.  The one thing
456
683
        # that tells ip_addresses what nodes these leases belong to are their
462
689
                nodegroup=node.nodegroup, mac=mac.mac_address)
463
690
        # The other node's leases do not get mistaken for ones that belong to
464
691
        # our original node.
465
 
        self.assertItemsEqual([], other_node.ip_addresses())
 
692
        self.assertItemsEqual([], other_node.dynamic_ip_addresses())
 
693
 
 
694
    def test_static_ip_addresses_returns_static_ip_addresses(self):
 
695
        node = factory.make_node()
 
696
        [mac2, mac3] = [
 
697
            factory.make_mac_address(node=node) for i in range(2)]
 
698
        ip1 = factory.make_staticipaddress(mac=mac2)
 
699
        ip2 = factory.make_staticipaddress(mac=mac3)
 
700
        # Create another node with a static IP address.
 
701
        other_node = factory.make_node(nodegroup=node.nodegroup, mac=True)
 
702
        factory.make_staticipaddress(mac=other_node.macaddress_set.all()[0])
 
703
        self.assertItemsEqual([ip1.ip, ip2.ip], node.static_ip_addresses())
 
704
 
 
705
    def test_static_ip_addresses_uses_result_cache(self):
 
706
        # static_ip_addresses has a specialized code path for the case where
 
707
        # the node's static IPs are already cached in Django's ORM.  This
 
708
        # test exercises that code path.
 
709
        node = factory.make_node()
 
710
        [mac2, mac3] = [
 
711
            factory.make_mac_address(node=node) for i in range(2)]
 
712
        ip1 = factory.make_staticipaddress(mac=mac2)
 
713
        ip2 = factory.make_staticipaddress(mac=mac3)
 
714
 
 
715
        # Don't address the node directly; address it through a query with
 
716
        # prefetched static IPs, to ensure that the query cache for those
 
717
        # IP addresses.
 
718
        query = Node.objects.filter(id=node.id)
 
719
        query = query.prefetch_related('macaddress_set__ip_addresses')
 
720
 
 
721
        # dynamic_ip_addresses() still returns the node's leased addresses.
 
722
        num_queries, addresses = count_queries(query[0].static_ip_addresses)
 
723
        self.assertEqual(0, num_queries)
 
724
        # The result is not a query set, so this isn't hiding a further query.
 
725
        self.assertIsInstance(addresses, list)
 
726
        # We still get exactly the right IP addresses.
 
727
        self.assertItemsEqual([ip1.ip, ip2.ip], addresses)
 
728
 
 
729
    def test_ip_addresses_returns_static_ip_addresses(self):
 
730
        # If both static and dynamic IP addresses are present, the static
 
731
        # addresses take precedence: they are allocated and deallocated in
 
732
        # a synchronous fashion whereas the dynamic addresses are updated
 
733
        # periodically.
 
734
        node = factory.make_node(mac=True)
 
735
        mac = node.macaddress_set.all()[0]
 
736
        # Create a dynamic IP attached to the node.
 
737
        factory.make_dhcp_lease(
 
738
            nodegroup=node.nodegroup, mac=mac.mac_address)
 
739
        # Create a static IP attached to the node.
 
740
        ip = factory.make_staticipaddress(mac=mac)
 
741
        self.assertItemsEqual([ip.ip], node.ip_addresses())
 
742
 
 
743
    def test_ip_addresses_returns_dynamic_ip_if_no_static_ip(self):
 
744
        node = factory.make_node(mac=True)
 
745
        lease = factory.make_dhcp_lease(
 
746
            nodegroup=node.nodegroup,
 
747
            mac=node.macaddress_set.all()[0].mac_address)
 
748
        self.assertItemsEqual([lease.ip], node.ip_addresses())
466
749
 
467
750
    def test_release_turns_on_netboot(self):
468
751
        node = factory.make_node(
471
754
        node.release()
472
755
        self.assertTrue(node.netboot)
473
756
 
474
 
    def test_release_clears_distro_series(self):
 
757
    def test_release_clears_osystem_and_distro_series(self):
475
758
        node = factory.make_node(
476
759
            status=NODE_STATUS.ALLOCATED, owner=factory.make_user())
477
 
        node.set_distro_series(series=DISTRO_SERIES.quantal)
 
760
        osystem = factory.getRandomOS()
 
761
        release = factory.getRandomRelease(osystem)
 
762
        node.osystem = osystem.name
 
763
        node.distro_series = release
478
764
        node.release()
 
765
        self.assertEqual("", node.osystem)
479
766
        self.assertEqual("", node.distro_series)
480
767
 
481
768
    def test_release_powers_off_node(self):
490
777
            (1, 'provisioningserver.tasks.power_off'),
491
778
            (len(self.celery.tasks), self.celery.tasks[0]['task'].name))
492
779
 
 
780
    def test_release_deallocates_static_ips(self):
 
781
        deallocate = self.patch(StaticIPAddressManager, 'deallocate_by_node')
 
782
        node = factory.make_node(
 
783
            status=NODE_STATUS.ALLOCATED, owner=factory.make_user(),
 
784
            power_type='ether_wake')
 
785
        node.release()
 
786
        self.assertThat(deallocate, MockCalledOnceWith(node))
 
787
 
 
788
    def test_release_updates_dns(self):
 
789
        change_dns_zones = self.patch(dns_module, 'change_dns_zones')
 
790
        nodegroup = factory.make_node_group(
 
791
            management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS,
 
792
            status=NODEGROUP_STATUS.ACCEPTED)
 
793
        node = factory.make_node(
 
794
            nodegroup=nodegroup, status=NODE_STATUS.ALLOCATED,
 
795
            owner=factory.make_user(), power_type='ether_wake')
 
796
        node.release()
 
797
        self.assertThat(change_dns_zones, MockCalledOnceWith([node.nodegroup]))
 
798
 
493
799
    def test_accept_enlistment_gets_node_out_of_declared_state(self):
494
800
        # If called on a node in Declared state, accept_enlistment()
495
801
        # changes the node's status, and returns the node.
608
914
        self.assertEqual(
609
915
            data, NodeCommissionResult.objects.get_data(node, filename))
610
916
 
 
917
    def test_abort_commissioning_changes_status_and_stops_node(self):
 
918
        self.patch(PowerAction, 'run_shell').return_value = ('', '')
 
919
        node = factory.make_node(
 
920
            status=NODE_STATUS.COMMISSIONING, power_type='virsh')
 
921
        node.abort_commissioning(factory.make_admin())
 
922
        expected_attrs = {
 
923
            'status': NODE_STATUS.DECLARED,
 
924
        }
 
925
        self.assertAttributes(node, expected_attrs)
 
926
        self.assertEqual(
 
927
            ['provisioningserver.tasks.power_off'],
 
928
            [task['task'].name for task in self.celery.tasks])
 
929
 
 
930
    def test_abort_commisssioning_doesnt_stop_nodes_for_non_admin_users(self):
 
931
        node = factory.make_node(
 
932
            status=NODE_STATUS.COMMISSIONING, power_type='virsh')
 
933
        node.abort_commissioning(factory.make_user())
 
934
        expected_attrs = {
 
935
            'status': NODE_STATUS.COMMISSIONING,
 
936
        }
 
937
        self.assertAttributes(node, expected_attrs)
 
938
        self.assertEqual([], self.celery.tasks)
 
939
 
 
940
    def test_abort_commisssioning_errors_if_node_is_not_commissioning(self):
 
941
        unaccepted_statuses = set(map_enum(NODE_STATUS).values())
 
942
        unaccepted_statuses.remove(NODE_STATUS.COMMISSIONING)
 
943
        for status in unaccepted_statuses:
 
944
            node = factory.make_node(
 
945
                status=status, power_type='virsh')
 
946
            self.assertRaises(
 
947
                NodeStateViolation, node.abort_commissioning,
 
948
                factory.make_admin())
 
949
 
611
950
    def test_full_clean_checks_status_transition_and_raises_if_invalid(self):
612
951
        # RETIRED -> ALLOCATED is an invalid transition.
613
952
        node = factory.make_node(
769
1108
        node = factory.make_node(architecture=full_arch)
770
1109
        self.assertEqual((main_arch, sub_arch), node.split_arch())
771
1110
 
 
1111
    def test_mac_addresses_on_managed_interfaces_returns_only_managed(self):
 
1112
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
1113
            management=NODEGROUPINTERFACE_MANAGEMENT.DHCP)
 
1114
 
 
1115
        mac_with_no_interface = factory.make_mac_address(node=node)
 
1116
        unmanaged_interface = factory.make_node_group_interface(
 
1117
            nodegroup=node.nodegroup,
 
1118
            management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED)
 
1119
        mac_with_unmanaged_interface = factory.make_mac_address(
 
1120
            node=node, cluster_interface=unmanaged_interface)
 
1121
        ignore_unused(mac_with_no_interface, mac_with_unmanaged_interface)
 
1122
 
 
1123
        observed = node.mac_addresses_on_managed_interfaces()
 
1124
        self.assertItemsEqual([node.get_primary_mac()], observed)
 
1125
 
 
1126
    def test_mac_addresses_on_managed_interfaces_returns_empty_if_none(self):
 
1127
        node = factory.make_node(mac=True)
 
1128
        observed = node.mac_addresses_on_managed_interfaces()
 
1129
        self.assertItemsEqual([], observed)
 
1130
 
772
1131
 
773
1132
class NodeRoutersTest(MAASServerTestCase):
774
1133
 
1057
1416
                self.celery.tasks[0]['kwargs']['mac_address'],
1058
1417
            ))
1059
1418
 
 
1419
    def test_start_nodes_does_not_claim_static_ip_unless_allocated(self):
 
1420
        user = factory.make_user()
 
1421
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
1422
            owner=user, power_type='ether_wake')
 
1423
        output = Node.objects.start_nodes([node.system_id], user)
 
1424
        # Check that the single node was started, and that only a
 
1425
        # power_on task was issued.
 
1426
        self.assertItemsEqual([node], output)
 
1427
        self.assertEqual(
 
1428
            [
 
1429
                'provisioningserver.tasks.power_on',
 
1430
            ],
 
1431
            [
 
1432
                task['task'].name for task in self.celery.tasks
 
1433
            ])
 
1434
 
 
1435
        # No static IPs should have been allocated.
 
1436
        self.assertItemsEqual([], StaticIPAddress.objects.all())
 
1437
 
 
1438
    def test_start_nodes_issues_dhcp_host_task(self):
 
1439
        user = factory.make_user()
 
1440
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
1441
            owner=user, power_type='ether_wake', status=NODE_STATUS.ALLOCATED)
 
1442
        omshell_create = self.patch(Omshell, 'create')
 
1443
        output = Node.objects.start_nodes([node.system_id], user)
 
1444
 
 
1445
        # Check that the single node was started, and that the tasks
 
1446
        # issued are all there and in the right order.
 
1447
        self.assertItemsEqual([node], output)
 
1448
        self.assertEqual(
 
1449
            [
 
1450
                'provisioningserver.tasks.add_new_dhcp_host_map',
 
1451
                'provisioningserver.tasks.power_on',
 
1452
            ],
 
1453
            [
 
1454
                task['task'].name for task in self.celery.tasks
 
1455
            ])
 
1456
 
 
1457
        # Also check that Omshell.create() was called with the right
 
1458
        # parameters.
 
1459
        mac = node.get_primary_mac()
 
1460
        [ip] = mac.ip_addresses.all()
 
1461
        expected_ip = ip.ip
 
1462
        expected_mac = mac.mac_address
 
1463
        args, kwargs = omshell_create.call_args
 
1464
        self.assertEqual((expected_ip, expected_mac), args)
 
1465
 
 
1466
    def test_start_nodes_clears_existing_dynamic_maps(self):
 
1467
        user = factory.make_user()
 
1468
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
1469
            owner=user, power_type='ether_wake', status=NODE_STATUS.ALLOCATED)
 
1470
        factory.make_dhcp_lease(
 
1471
            nodegroup=node.nodegroup, mac=node.get_primary_mac().mac_address)
 
1472
        self.patch(Omshell, 'create')
 
1473
        self.patch(Omshell, 'remove')
 
1474
        output = Node.objects.start_nodes([node.system_id], user)
 
1475
 
 
1476
        # Check that the single node was started, and that the tasks
 
1477
        # issued are all there and in the right order.
 
1478
        self.assertItemsEqual([node], output)
 
1479
        self.assertEqual(
 
1480
            [
 
1481
                'provisioningserver.tasks.remove_dhcp_host_map',
 
1482
                'provisioningserver.tasks.add_new_dhcp_host_map',
 
1483
                'provisioningserver.tasks.power_on',
 
1484
            ],
 
1485
            [
 
1486
                task['task'].name for task in self.celery.tasks
 
1487
            ])
 
1488
 
1060
1489
    def test_start_nodes_task_routed_to_nodegroup_worker(self):
 
1490
        # Startup jobs are chained, so the normal way of inspecting a
 
1491
        # task directly for routing options doesn't work here, because
 
1492
        # in EAGER mode that we use in the test suite, the options are
 
1493
        # not passed all the way down to the tasks.  Instead, we patch
 
1494
        # some celery code to inspect the options that were passed.
1061
1495
        user = factory.make_user()
1062
1496
        node, mac = self.make_node_with_mac(
1063
1497
            user, power_type='ether_wake')
1064
 
        task = self.patch(node_module, 'power_on')
 
1498
        option_call = self.patch(celery.canvas.Signature, 'set')
1065
1499
        Node.objects.start_nodes([node.system_id], user)
1066
 
        args, kwargs = task.apply_async.call_args
 
1500
        args, kwargs = option_call.call_args
1067
1501
        self.assertEqual(node.work_queue, kwargs['queue'])
1068
1502
 
1069
1503
    def test_start_nodes_does_not_attempt_power_task_if_no_power_type(self):
1184
1618
        node = factory.make_node(netboot=True)
1185
1619
        node.set_netboot(False)
1186
1620
        self.assertFalse(node.netboot)
 
1621
 
 
1622
 
 
1623
class NodeStaticIPClaimingTest(MAASServerTestCase):
 
1624
 
 
1625
    def test_claim_static_ips_ignores_unmanaged_macs(self):
 
1626
        node = factory.make_node()
 
1627
        factory.make_mac_address(node=node)
 
1628
        observed = node.claim_static_ips()
 
1629
        self.assertItemsEqual([], observed)
 
1630
 
 
1631
    def test_claim_static_ips_creates_task_for_each_managed_mac(self):
 
1632
        nodegroup = factory.make_node_group()
 
1633
        node = factory.make_node(nodegroup=nodegroup)
 
1634
 
 
1635
        # Add some MACs attached to managed interfaces.
 
1636
        number_of_macs = 2
 
1637
        for _ in range(0, number_of_macs):
 
1638
            ngi = factory.make_node_group_interface(
 
1639
                nodegroup,
 
1640
                management=NODEGROUPINTERFACE_MANAGEMENT.DHCP)
 
1641
            factory.make_mac_address(node=node, cluster_interface=ngi)
 
1642
 
 
1643
        observed = node.claim_static_ips()
 
1644
        expected = [
 
1645
            'provisioningserver.tasks.add_new_dhcp_host_map'] * number_of_macs
 
1646
 
 
1647
        self.assertEqual(
 
1648
            expected,
 
1649
            [task.task for task in observed]
 
1650
            )
 
1651
 
 
1652
    def test_claim_static_ips_creates_deletion_task(self):
 
1653
        # If dhcp leases exist before creating a static IP, the code
 
1654
        # should attempt to remove their host maps.
 
1655
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
1656
        factory.make_dhcp_lease(
 
1657
            nodegroup=node.nodegroup, mac=node.get_primary_mac().mac_address)
 
1658
 
 
1659
        observed = node.claim_static_ips()
 
1660
 
 
1661
        self.assertEqual(
 
1662
            [
 
1663
                'celery.chain',
 
1664
                'provisioningserver.tasks.add_new_dhcp_host_map',
 
1665
            ],
 
1666
            [
 
1667
                task.task for task in observed
 
1668
            ])
 
1669
 
 
1670
        # Probe the chain to make sure it has the deletion task.
 
1671
        self.assertEqual(
 
1672
            'provisioningserver.tasks.remove_dhcp_host_map',
 
1673
            observed[0].tasks[0].task,
 
1674
            )
 
1675
 
 
1676
    def test_claim_static_ips_ignores_interface_with_no_static_range(self):
 
1677
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
1678
        ngi = node.get_primary_mac().cluster_interface
 
1679
        ngi.static_ip_range_low = None
 
1680
        ngi.static_ip_range_high = None
 
1681
        ngi.save()
 
1682
 
 
1683
        observed = node.claim_static_ips()
 
1684
        self.assertItemsEqual([], observed)
 
1685
 
 
1686
    def test_claim_static_ips_deallocates_if_cant_complete_all_macs(self):
 
1687
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
1688
        self.patch(
 
1689
            MACAddress,
 
1690
            'claim_static_ip').side_effect = StaticIPAddressExhaustion
 
1691
        deallocate_call = self.patch(
 
1692
            StaticIPAddressManager, 'deallocate_by_node')
 
1693
        self.assertRaises(StaticIPAddressExhaustion, node.claim_static_ips)
 
1694
        self.assertThat(deallocate_call, MockCalledOnceWith(node))
 
1695
 
 
1696
    def test_claim_static_ips_does_not_deallocate_if_completes_all_macs(self):
 
1697
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
1698
        deallocate_call = self.patch(
 
1699
            StaticIPAddressManager, 'deallocate_by_node')
 
1700
        node.claim_static_ips()
 
1701
 
 
1702
        self.assertThat(deallocate_call, MockNotCalled())
 
1703
 
 
1704
    def test_claim_static_ips_updates_dns(self):
 
1705
        node = factory.make_node_with_mac_attached_to_nodegroupinterface(
 
1706
            management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS)
 
1707
        node.nodegroup.status = NODEGROUP_STATUS.ACCEPTED
 
1708
        change_dns_zones = self.patch(dns_module, 'change_dns_zones')
 
1709
        node.claim_static_ips()
 
1710
        self.assertThat(change_dns_zones, MockCalledOnceWith([node.nodegroup]))
 
1711
 
 
1712
    def test_claim_static_ips_creates_no_tasks_if_existing_IP(self):
 
1713
        node = factory.make_node_with_mac_attached_to_nodegroupinterface()
 
1714
        primary_mac = node.get_primary_mac()
 
1715
        primary_mac.claim_static_ip(alloc_type=IPADDRESS_TYPE.STICKY)
 
1716
        ngi = factory.make_node_group_interface(
 
1717
            node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP)
 
1718
        second_mac = factory.make_mac_address(node=node, cluster_interface=ngi)
 
1719
        observed_tasks = node.claim_static_ips()
 
1720
 
 
1721
        # We expect only a single task for the one MAC which did not
 
1722
        # already have any IP.
 
1723
        expected = ['provisioningserver.tasks.add_new_dhcp_host_map']
 
1724
        self.assertEqual(
 
1725
            expected,
 
1726
            [task.task for task in observed_tasks]
 
1727
            )
 
1728
        task_mapping_arg = observed_tasks[0].kwargs["mappings"]
 
1729
        [observed_mac] = task_mapping_arg.values()
 
1730
        self.assertEqual(second_mac.mac_address.get_raw(), observed_mac)