~ubuntu-branches/ubuntu/vivid/sahara/vivid-proposed

« back to all changes in this revision

Viewing changes to sahara/tests/unit/plugins/hdp/test_clusterspec_hdp2.py

  • Committer: Package Import Robot
  • Author(s): Thomas Goirand
  • Date: 2014-09-24 16:34:46 UTC
  • Revision ID: package-import@ubuntu.com-20140924163446-8gu3zscu5e3n9lr2
Tags: upstream-2014.2~b3
ImportĀ upstreamĀ versionĀ 2014.2~b3

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# Copyright (c) 2013 Hortonworks, Inc.
 
2
#
 
3
# Licensed under the Apache License, Version 2.0 (the "License");
 
4
# you may not use this file except in compliance with the License.
 
5
# You may obtain a copy of the License at
 
6
#
 
7
#    http://www.apache.org/licenses/LICENSE-2.0
 
8
#
 
9
# Unless required by applicable law or agreed to in writing, software
 
10
# distributed under the License is distributed on an "AS IS" BASIS,
 
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 
12
# implied.
 
13
# See the License for the specific language governing permissions and
 
14
# limitations under the License.
 
15
 
 
16
import mock
 
17
import pkg_resources as pkg
 
18
 
 
19
from sahara.plugins.general import exceptions as ex
 
20
from sahara.plugins.hdp import clusterspec as cs
 
21
from sahara.plugins.hdp.versions.version_2_0_6 import services as s2
 
22
from sahara.plugins import provisioning
 
23
from sahara.tests.unit import base as sahara_base
 
24
import sahara.tests.unit.plugins.hdp.hdp_test_base as base
 
25
from sahara.topology import topology_helper as th
 
26
from sahara import version
 
27
 
 
28
 
 
29
class TestCONF(object):
 
30
    def __init__(self, enable_data_locality, enable_hypervisor_awareness):
 
31
        self.enable_data_locality = enable_data_locality
 
32
        self.enable_hypervisor_awareness = enable_hypervisor_awareness
 
33
 
 
34
 
 
35
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
 
36
            base.get_instance_info)
 
37
@mock.patch('sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
 
38
            '_get_swift_properties',
 
39
            return_value=[])
 
40
class ClusterSpecTestForHDP2(sahara_base.SaharaTestCase):
 
41
    service_validators = {}
 
42
 
 
43
    def setUp(self):
 
44
        super(ClusterSpecTestForHDP2, self).setUp()
 
45
        self.service_validators['YARN'] = self._assert_yarn
 
46
        self.service_validators['HDFS'] = self._assert_hdfs
 
47
        self.service_validators['MAPREDUCE2'] = self._assert_mrv2
 
48
        self.service_validators['GANGLIA'] = self._assert_ganglia
 
49
        self.service_validators['NAGIOS'] = self._assert_nagios
 
50
        self.service_validators['AMBARI'] = self._assert_ambari
 
51
        self.service_validators['PIG'] = self._assert_pig
 
52
        self.service_validators['HIVE'] = self._assert_hive
 
53
        self.service_validators['HCATALOG'] = self._assert_hcatalog
 
54
        self.service_validators['ZOOKEEPER'] = self._assert_zookeeper
 
55
        self.service_validators['WEBHCAT'] = self._assert_webhcat
 
56
        self.service_validators['OOZIE'] = self._assert_oozie
 
57
        self.service_validators['SQOOP'] = self._assert_sqoop
 
58
        self.service_validators['HBASE'] = self._assert_hbase
 
59
        self.service_validators['HUE'] = self._assert_hue
 
60
 
 
61
    def test_parse_default_with_cluster(self, patched):
 
62
        cluster_config_file = pkg.resource_string(
 
63
            version.version_info.package,
 
64
            'plugins/hdp/versions/version_2_0_6/resources/'
 
65
            'default-cluster.template')
 
66
 
 
67
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
68
                                  '111.11.1111', '222.11.1111')
 
69
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
70
                                  '222.22.2222', '333.22.2222')
 
71
 
 
72
        node_group1 = TestNodeGroup(
 
73
            'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
74
                                  "HISTORYSERVER", "SECONDARY_NAMENODE",
 
75
                                  "GANGLIA_SERVER", "GANGLIA_MONITOR",
 
76
                                  "NAGIOS_SERVER", "AMBARI_SERVER",
 
77
                                  "AMBARI_AGENT", "ZOOKEEPER_SERVER"])
 
78
        node_group2 = TestNodeGroup('slave', [server2], ['NODEMANAGER',
 
79
                                                         'DATANODE'])
 
80
        cluster = base.TestCluster([node_group1, node_group2])
 
81
 
 
82
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
83
        cluster_config.create_operational_config(cluster, [])
 
84
 
 
85
        self._assert_services(cluster_config.services)
 
86
        self._assert_configurations(cluster_config.configurations)
 
87
 
 
88
        node_groups = cluster_config.node_groups
 
89
        self.assertEqual(2, len(node_groups))
 
90
        self.assertIn('master', node_groups)
 
91
        self.assertIn('slave', node_groups)
 
92
 
 
93
        master_node_group = node_groups['master']
 
94
        self.assertEqual('master', master_node_group.name)
 
95
        self.assertEqual(13, len(master_node_group.components))
 
96
        self.assertIn('NAMENODE', master_node_group.components)
 
97
        self.assertIn('RESOURCEMANAGER', master_node_group.components)
 
98
        self.assertIn('HISTORYSERVER', master_node_group.components)
 
99
        self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
 
100
        self.assertIn('GANGLIA_SERVER', master_node_group.components)
 
101
        self.assertIn('GANGLIA_MONITOR', master_node_group.components)
 
102
        self.assertIn('NAGIOS_SERVER', master_node_group.components)
 
103
        self.assertIn('AMBARI_SERVER', master_node_group.components)
 
104
        self.assertIn('AMBARI_AGENT', master_node_group.components)
 
105
        self.assertIn('YARN_CLIENT', master_node_group.components)
 
106
        self.assertIn('ZOOKEEPER_SERVER', master_node_group.components)
 
107
 
 
108
        slave_node_group = node_groups['slave']
 
109
        self.assertEqual('slave', slave_node_group.name)
 
110
        self.assertIn('NODEMANAGER', slave_node_group.components)
 
111
 
 
112
        return cluster_config
 
113
 
 
114
    def test_determine_component_hosts(self, patched):
 
115
        cluster_config_file = pkg.resource_string(
 
116
            version.version_info.package,
 
117
            'plugins/hdp/versions/version_2_0_6/resources/'
 
118
            'default-cluster.template')
 
119
 
 
120
        server1 = base.TestServer('ambari_machine', 'master', '11111', 3,
 
121
                                  '111.11.1111', '222.11.1111')
 
122
        server2 = base.TestServer('host2', 'slave', '11111', 3, '222.22.2222',
 
123
                                  '333.22.2222')
 
124
        server3 = base.TestServer('host3', 'slave', '11111', 3, '222.22.2223',
 
125
                                  '333.22.2223')
 
126
 
 
127
        node_group1 = TestNodeGroup(
 
128
            'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
129
                                  "HISTORYSERVER", "SECONDARY_NAMENODE",
 
130
                                  "GANGLIA_SERVER", "NAGIOS_SERVER",
 
131
                                  "AMBARI_SERVER", "ZOOKEEPER_SERVER"])
 
132
        node_group2 = TestNodeGroup(
 
133
            'slave', [server2], ["DATANODE", "NODEMANAGER",
 
134
                                 "HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
 
135
 
 
136
        node_group3 = TestNodeGroup(
 
137
            'slave2', [server3], ["DATANODE", "NODEMANAGER",
 
138
                                  "HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
 
139
 
 
140
        cluster = base.TestCluster([node_group1, node_group2, node_group3])
 
141
 
 
142
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
143
        cluster_config.create_operational_config(cluster, [])
 
144
 
 
145
        hosts = cluster_config.determine_component_hosts('AMBARI_SERVER')
 
146
        self.assertEqual(1, len(hosts))
 
147
        self.assertEqual('ambari_machine', hosts.pop().fqdn())
 
148
 
 
149
        hosts = cluster_config.determine_component_hosts('DATANODE')
 
150
        self.assertEqual(2, len(hosts))
 
151
        datanodes = set([server2.fqdn(), server3.fqdn()])
 
152
        host_fqdn = set([hosts.pop().fqdn(), hosts.pop().fqdn()])
 
153
        # test intersection is both servers
 
154
        self.assertEqual(datanodes, host_fqdn & datanodes)
 
155
 
 
156
    def test_finalize_configuration(self, patched):
 
157
        patched.return_value = [{'name': 'swift.prop1',
 
158
                                'value': 'swift_prop_value'},
 
159
                                {'name': 'swift.prop2',
 
160
                                'value': 'swift_prop_value2'}]
 
161
        cluster_config_file = pkg.resource_string(
 
162
            version.version_info.package,
 
163
            'plugins/hdp/versions/version_2_0_6/resources/'
 
164
            'default-cluster.template')
 
165
 
 
166
        master_host = base.TestServer(
 
167
            'master.novalocal', 'master', '11111', 3,
 
168
            '111.11.1111', '222.11.1111')
 
169
 
 
170
        jt_host = base.TestServer(
 
171
            'jt_host.novalocal', 'jt', '11111', 3,
 
172
            '111.11.2222', '222.11.2222')
 
173
 
 
174
        nn_host = base.TestServer(
 
175
            'nn_host.novalocal', 'nn', '11111', 3,
 
176
            '111.11.3333', '222.11.3333')
 
177
 
 
178
        snn_host = base.TestServer(
 
179
            'snn_host.novalocal', 'jt', '11111', 3,
 
180
            '111.11.4444', '222.11.4444')
 
181
 
 
182
        hive_host = base.TestServer(
 
183
            'hive_host.novalocal', 'hive', '11111', 3,
 
184
            '111.11.5555', '222.11.5555')
 
185
 
 
186
        hive_ms_host = base.TestServer(
 
187
            'hive_ms_host.novalocal', 'hive_ms', '11111', 3,
 
188
            '111.11.6666', '222.11.6666')
 
189
 
 
190
        hive_mysql_host = base.TestServer(
 
191
            'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3,
 
192
            '111.11.7777', '222.11.7777')
 
193
 
 
194
        hcat_host = base.TestServer(
 
195
            'hcat_host.novalocal', 'hcat', '11111', 3,
 
196
            '111.11.8888', '222.11.8888')
 
197
 
 
198
        zk_host = base.TestServer(
 
199
            'zk_host.novalocal', 'zk', '11111', 3,
 
200
            '111.11.9999', '222.11.9999')
 
201
 
 
202
        oozie_host = base.TestServer(
 
203
            'oozie_host.novalocal', 'oozie', '11111', 3,
 
204
            '111.11.9999', '222.11.9999')
 
205
 
 
206
        slave_host = base.TestServer(
 
207
            'slave1.novalocal', 'slave', '11111', 3,
 
208
            '222.22.6666', '333.22.6666')
 
209
 
 
210
        master_ng = TestNodeGroup(
 
211
            'master', [master_host], ["GANGLIA_SERVER",
 
212
                                      "GANGLIA_MONITOR",
 
213
                                      "NAGIOIS_SERVER",
 
214
                                      "AMBARI_SERVER",
 
215
                                      "AMBARI_AGENT"])
 
216
 
 
217
        jt_ng = TestNodeGroup(
 
218
            'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR",
 
219
                              "HISTORYSERVER", "AMBARI_AGENT"])
 
220
 
 
221
        nn_ng = TestNodeGroup(
 
222
            'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR",
 
223
                              "AMBARI_AGENT"])
 
224
 
 
225
        snn_ng = TestNodeGroup(
 
226
            'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR",
 
227
                                "AMBARI_AGENT"])
 
228
 
 
229
        hive_ng = TestNodeGroup(
 
230
            'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR",
 
231
                                  "AMBARI_AGENT"])
 
232
 
 
233
        hive_ms_ng = TestNodeGroup(
 
234
            'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR",
 
235
                                     "AMBARI_AGENT"])
 
236
 
 
237
        hive_mysql_ng = TestNodeGroup(
 
238
            'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR",
 
239
                                         "AMBARI_AGENT"])
 
240
 
 
241
        hcat_ng = TestNodeGroup(
 
242
            'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR",
 
243
                                  "AMBARI_AGENT"])
 
244
 
 
245
        zk_ng = TestNodeGroup(
 
246
            'zk', [zk_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
 
247
                              "AMBARI_AGENT"])
 
248
 
 
249
        oozie_ng = TestNodeGroup(
 
250
            'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR",
 
251
                                    "AMBARI_AGENT"])
 
252
        slave_ng = TestNodeGroup(
 
253
            'slave', [slave_host], ["DATANODE", "NODEMANAGER",
 
254
                                    "GANGLIA_MONITOR", "HDFS_CLIENT",
 
255
                                    "MAPREDUCE2_CLIENT", "OOZIE_CLIENT",
 
256
                                    "AMBARI_AGENT"])
 
257
 
 
258
        user_input_config = TestUserInputConfig(
 
259
            'core-site', 'cluster', 'fs.defaultFS')
 
260
        user_input = provisioning.UserInput(
 
261
            user_input_config, 'hdfs://nn_dif_host.novalocal:8020')
 
262
 
 
263
        cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng,
 
264
                                    hive_ms_ng, hive_mysql_ng,
 
265
                                    hcat_ng, zk_ng, oozie_ng, slave_ng])
 
266
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
267
        cluster_config.create_operational_config(cluster, [user_input])
 
268
        config = cluster_config.configurations
 
269
 
 
270
        # for this value, validating that user inputs override configured
 
271
        # values, whether they are processed by runtime or not
 
272
        self.assertEqual(config['core-site']['fs.defaultFS'],
 
273
                         'hdfs://nn_dif_host.novalocal:8020')
 
274
 
 
275
        self.assertEqual(config['mapred-site']
 
276
                         ['mapreduce.jobhistory.webapp.address'],
 
277
                         'jt_host.novalocal:19888')
 
278
 
 
279
        self.assertEqual(config['hdfs-site']['dfs.namenode.http-address'],
 
280
                         'nn_host.novalocal:50070')
 
281
        self.assertEqual(config['hdfs-site']
 
282
                         ['dfs.namenode.secondary.http-address'],
 
283
                         'snn_host.novalocal:50090')
 
284
        self.assertEqual(config['hdfs-site']['dfs.namenode.https-address'],
 
285
                         'nn_host.novalocal:50470')
 
286
 
 
287
        self.assertEqual(config['global']['hive_hostname'],
 
288
                         'hive_host.novalocal')
 
289
        self.assertEqual(config['core-site']['hadoop.proxyuser.hive.hosts'],
 
290
                         'hive_host.novalocal')
 
291
        self.assertEqual(config['hive-site']
 
292
                         ['javax.jdo.option.ConnectionURL'],
 
293
                         'jdbc:mysql://hive_mysql_host.novalocal/hive?'
 
294
                         'createDatabaseIfNotExist=true')
 
295
        self.assertEqual(config['hive-site']['hive.metastore.uris'],
 
296
                         'thrift://hive_ms_host.novalocal:9083')
 
297
        self.assertTrue(
 
298
            'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in
 
299
            config['webhcat-site']['templeton.hive.properties'])
 
300
        self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.hosts'],
 
301
                         'hcat_host.novalocal')
 
302
        self.assertEqual(config['webhcat-site']['templeton.zookeeper.hosts'],
 
303
                         'zk_host.novalocal:2181')
 
304
 
 
305
        self.assertEqual(config['oozie-site']['oozie.base.url'],
 
306
                         'http://oozie_host.novalocal:11000/oozie')
 
307
        self.assertEqual(config['global']['oozie_hostname'],
 
308
                         'oozie_host.novalocal')
 
309
        self.assertEqual(config['core-site']['hadoop.proxyuser.oozie.hosts'],
 
310
                         'oozie_host.novalocal,222.11.9999,111.11.9999')
 
311
 
 
312
        # test swift properties
 
313
        self.assertEqual('swift_prop_value',
 
314
                         config['core-site']['swift.prop1'])
 
315
        self.assertEqual('swift_prop_value2',
 
316
                         config['core-site']['swift.prop2'])
 
317
 
 
318
    def test_finalize_configuration_with_hue(self, patched):
 
319
        patched.return_value = [{'name': 'swift.prop1',
 
320
                                'value': 'swift_prop_value'},
 
321
                                {'name': 'swift.prop2',
 
322
                                'value': 'swift_prop_value2'}]
 
323
        cluster_config_file = pkg.resource_string(
 
324
            version.version_info.package,
 
325
            'plugins/hdp/versions/version_2_0_6/resources/'
 
326
            'default-cluster.template')
 
327
 
 
328
        master_host = base.TestServer(
 
329
            'master.novalocal', 'master', '11111', 3,
 
330
            '111.11.1111', '222.11.1111')
 
331
 
 
332
        jt_host = base.TestServer(
 
333
            'jt_host.novalocal', 'jt', '11111', 3,
 
334
            '111.11.2222', '222.11.2222')
 
335
 
 
336
        nn_host = base.TestServer(
 
337
            'nn_host.novalocal', 'nn', '11111', 3,
 
338
            '111.11.3333', '222.11.3333')
 
339
 
 
340
        snn_host = base.TestServer(
 
341
            'snn_host.novalocal', 'jt', '11111', 3,
 
342
            '111.11.4444', '222.11.4444')
 
343
 
 
344
        hive_host = base.TestServer(
 
345
            'hive_host.novalocal', 'hive', '11111', 3,
 
346
            '111.11.5555', '222.11.5555')
 
347
 
 
348
        hive_ms_host = base.TestServer(
 
349
            'hive_ms_host.novalocal', 'hive_ms', '11111', 3,
 
350
            '111.11.6666', '222.11.6666')
 
351
 
 
352
        hive_mysql_host = base.TestServer(
 
353
            'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3,
 
354
            '111.11.7777', '222.11.7777')
 
355
 
 
356
        hcat_host = base.TestServer(
 
357
            'hcat_host.novalocal', 'hcat', '11111', 3,
 
358
            '111.11.8888', '222.11.8888')
 
359
 
 
360
        zk_host = base.TestServer(
 
361
            'zk_host.novalocal', 'zk', '11111', 3,
 
362
            '111.11.9999', '222.11.9999')
 
363
 
 
364
        oozie_host = base.TestServer(
 
365
            'oozie_host.novalocal', 'oozie', '11111', 3,
 
366
            '111.11.9999', '222.11.9999')
 
367
 
 
368
        slave_host = base.TestServer(
 
369
            'slave1.novalocal', 'slave', '11111', 3,
 
370
            '222.22.6666', '333.22.6666')
 
371
 
 
372
        master_ng = TestNodeGroup(
 
373
            'master', [master_host], ["GANGLIA_SERVER",
 
374
                                      "GANGLIA_MONITOR",
 
375
                                      "NAGIOIS_SERVER",
 
376
                                      "AMBARI_SERVER",
 
377
                                      "AMBARI_AGENT"])
 
378
 
 
379
        jt_ng = TestNodeGroup(
 
380
            'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR",
 
381
                              "HISTORYSERVER", "AMBARI_AGENT"])
 
382
 
 
383
        nn_ng = TestNodeGroup(
 
384
            'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR",
 
385
                              "AMBARI_AGENT"])
 
386
 
 
387
        snn_ng = TestNodeGroup(
 
388
            'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR",
 
389
                                "AMBARI_AGENT"])
 
390
 
 
391
        hive_ng = TestNodeGroup(
 
392
            'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR",
 
393
                                  "AMBARI_AGENT"])
 
394
 
 
395
        hive_ms_ng = TestNodeGroup(
 
396
            'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR",
 
397
                                     "AMBARI_AGENT"])
 
398
 
 
399
        hive_mysql_ng = TestNodeGroup(
 
400
            'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR",
 
401
                                         "AMBARI_AGENT"])
 
402
 
 
403
        hcat_ng = TestNodeGroup(
 
404
            'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR",
 
405
                                  "AMBARI_AGENT"])
 
406
 
 
407
        zk_ng = TestNodeGroup(
 
408
            'zk', [zk_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
 
409
                              "AMBARI_AGENT"])
 
410
 
 
411
        oozie_ng = TestNodeGroup(
 
412
            'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR",
 
413
                                    "AMBARI_AGENT"])
 
414
        slave_ng = TestNodeGroup(
 
415
            'slave', [slave_host], ["DATANODE", "NODEMANAGER",
 
416
                                    "GANGLIA_MONITOR", "HDFS_CLIENT",
 
417
                                    "MAPREDUCE2_CLIENT", "OOZIE_CLIENT",
 
418
                                    "AMBARI_AGENT", "HUE"])
 
419
 
 
420
        user_input_config = TestUserInputConfig(
 
421
            'core-site', 'cluster', 'fs.defaultFS')
 
422
        user_input = provisioning.UserInput(
 
423
            user_input_config, 'hdfs://nn_dif_host.novalocal:8020')
 
424
 
 
425
        cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng,
 
426
                                    hive_ms_ng, hive_mysql_ng,
 
427
                                    hcat_ng, zk_ng, oozie_ng, slave_ng])
 
428
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
429
        cluster_config.create_operational_config(cluster, [user_input])
 
430
        config = cluster_config.configurations
 
431
 
 
432
        # for this value, validating that user inputs override configured
 
433
        # values, whether they are processed by runtime or not
 
434
        self.assertEqual(config['core-site']['fs.defaultFS'],
 
435
                         'hdfs://nn_dif_host.novalocal:8020')
 
436
 
 
437
        self.assertEqual(config['mapred-site']
 
438
                         ['mapreduce.jobhistory.webapp.address'],
 
439
                         'jt_host.novalocal:19888')
 
440
 
 
441
        self.assertEqual(config['hdfs-site']['dfs.namenode.http-address'],
 
442
                         'nn_host.novalocal:50070')
 
443
        self.assertEqual(config['hdfs-site']
 
444
                         ['dfs.namenode.secondary.http-address'],
 
445
                         'snn_host.novalocal:50090')
 
446
        self.assertEqual(config['hdfs-site']['dfs.namenode.https-address'],
 
447
                         'nn_host.novalocal:50470')
 
448
        self.assertEqual(config['hdfs-site']['dfs.support.broken.append'],
 
449
                         'true')
 
450
        self.assertEqual(config['hdfs-site']['dfs.webhdfs.enabled'],
 
451
                         'true')
 
452
 
 
453
        self.assertEqual(config['global']['hive_hostname'],
 
454
                         'hive_host.novalocal')
 
455
        self.assertEqual(config['core-site']['hadoop.proxyuser.hive.hosts'],
 
456
                         'hive_host.novalocal')
 
457
        self.assertEqual(config['hive-site']
 
458
                         ['javax.jdo.option.ConnectionURL'],
 
459
                         'jdbc:mysql://hive_mysql_host.novalocal/hive?'
 
460
                         'createDatabaseIfNotExist=true')
 
461
        self.assertEqual(config['hive-site']['hive.metastore.uris'],
 
462
                         'thrift://hive_ms_host.novalocal:9083')
 
463
        self.assertTrue(
 
464
            'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in
 
465
            config['webhcat-site']['templeton.hive.properties'])
 
466
        self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.hosts'],
 
467
                         '*')
 
468
        self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.groups'],
 
469
                         '*')
 
470
        self.assertEqual(config['core-site']['hadoop.proxyuser.hue.hosts'],
 
471
                         '*')
 
472
        self.assertEqual(config['core-site']['hadoop.proxyuser.hue.groups'],
 
473
                         '*')
 
474
        self.assertEqual(config['webhcat-site']['templeton.zookeeper.hosts'],
 
475
                         'zk_host.novalocal:2181')
 
476
        self.assertEqual(config['webhcat-site']['webhcat.proxyuser.hue.hosts'],
 
477
                         '*')
 
478
        self.assertEqual(config['webhcat-site']
 
479
                         ['webhcat.proxyuser.hue.groups'],
 
480
                         '*')
 
481
 
 
482
        self.assertEqual(config['oozie-site']['oozie.base.url'],
 
483
                         'http://oozie_host.novalocal:11000/oozie')
 
484
        self.assertEqual(config['oozie-site']
 
485
                         ['oozie.service.ProxyUserService.proxyuser.hue.'
 
486
                          'groups'], '*')
 
487
        self.assertEqual(config['oozie-site']
 
488
                         ['oozie.service.ProxyUserService.proxyuser.hue.'
 
489
                          'hosts'], '*')
 
490
        self.assertEqual(config['global']['oozie_hostname'],
 
491
                         'oozie_host.novalocal')
 
492
        self.assertEqual(config['core-site']['hadoop.proxyuser.oozie.hosts'],
 
493
                         'oozie_host.novalocal,222.11.9999,111.11.9999')
 
494
 
 
495
        # test swift properties
 
496
        self.assertEqual('swift_prop_value',
 
497
                         config['core-site']['swift.prop1'])
 
498
        self.assertEqual('swift_prop_value2',
 
499
                         config['core-site']['swift.prop2'])
 
500
 
 
501
    def test__determine_deployed_services(self, nova_mock):
 
502
        cluster_config_file = pkg.resource_string(
 
503
            version.version_info.package,
 
504
            'plugins/hdp/versions/version_2_0_6/resources/'
 
505
            'default-cluster.template')
 
506
 
 
507
        master_host = base.TestServer(
 
508
            'master.novalocal', 'master', '11111', 3,
 
509
            '111.11.1111', '222.11.1111')
 
510
 
 
511
        jt_host = base.TestServer(
 
512
            'jt_host.novalocal', 'jt', '11111', 3,
 
513
            '111.11.2222', '222.11.2222')
 
514
 
 
515
        nn_host = base.TestServer(
 
516
            'nn_host.novalocal', 'nn', '11111', 3,
 
517
            '111.11.3333', '222.11.3333')
 
518
 
 
519
        snn_host = base.TestServer(
 
520
            'snn_host.novalocal', 'jt', '11111', 3,
 
521
            '111.11.4444', '222.11.4444')
 
522
 
 
523
        slave_host = base.TestServer(
 
524
            'slave1.novalocal', 'slave', '11111', 3,
 
525
            '222.22.6666', '333.22.6666')
 
526
 
 
527
        master_ng = TestNodeGroup(
 
528
            'master', [master_host],
 
529
            ['GANGLIA_SERVER',
 
530
             'GANGLIA_MONITOR', 'NAGIOS_SERVER',
 
531
             'AMBARI_SERVER', 'AMBARI_AGENT', 'ZOOKEEPER_SERVER'])
 
532
        jt_ng = TestNodeGroup('jt', [jt_host], ["RESOURCEMANAGER",
 
533
                                                "HISTORYSERVER",
 
534
                                                "GANGLIA_MONITOR",
 
535
                                                "AMBARI_AGENT"])
 
536
        nn_ng = TestNodeGroup('nn', [nn_host], ["NAMENODE",
 
537
                              "GANGLIA_MONITOR", "AMBARI_AGENT"])
 
538
        snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE",
 
539
                               "GANGLIA_MONITOR", "AMBARI_AGENT"])
 
540
        slave_ng = TestNodeGroup(
 
541
            'slave', [slave_host],
 
542
            ["DATANODE", "NODEMANAGER",
 
543
             "GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE2_CLIENT",
 
544
             "AMBARI_AGENT"])
 
545
 
 
546
        cluster = base.TestCluster([master_ng, jt_ng, nn_ng,
 
547
                                   snn_ng, slave_ng])
 
548
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
549
        cluster_config.create_operational_config(cluster, [])
 
550
        services = cluster_config.services
 
551
        for service in services:
 
552
            if service.name in ['YARN', 'HDFS', 'MAPREDUCE2', 'GANGLIA',
 
553
                                'AMBARI', 'NAGIOS', 'ZOOKEEPER']:
 
554
                self.assertTrue(service.deployed)
 
555
            else:
 
556
                self.assertFalse(service.deployed)
 
557
 
 
558
    def test_ambari_rpm_path(self, patched):
 
559
        cluster_config_file = pkg.resource_string(
 
560
            version.version_info.package,
 
561
            'plugins/hdp/versions/version_2_0_6/resources/'
 
562
            'default-cluster.template')
 
563
        cluster_spec = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
564
 
 
565
        ambari_config = cluster_spec.configurations['ambari']
 
566
        rpm = ambari_config.get('rpm', None)
 
567
        self.assertEqual('http://s3.amazonaws.com/'
 
568
                         'public-repo-1.hortonworks.com/ambari/centos6/'
 
569
                         '1.x/updates/1.6.0/ambari.repo', rpm)
 
570
 
 
571
    def test_fs_umask(self, patched):
 
572
        s_conf = s2.CONF
 
573
        try:
 
574
            s2.CONF = TestCONF(False, False)
 
575
            cluster_config_file = pkg.resource_string(
 
576
                version.version_info.package,
 
577
                'plugins/hdp/versions/version_2_0_6/resources/'
 
578
                'default-cluster.template')
 
579
 
 
580
            server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
581
                                      '111.11.1111', '222.11.1111')
 
582
            server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
583
                                      '222.22.2222', '333.22.2222')
 
584
 
 
585
            node_group1 = TestNodeGroup(
 
586
                'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
587
                                      "SECONDARY_NAMENODE", "GANGLIA_SERVER",
 
588
                                      "GANGLIA_MONITOR", "NAGIOS_SERVER",
 
589
                                      "AMBARI_SERVER", "AMBARI_AGENT",
 
590
                                      "HISTORYSERVER", "ZOOKEEPER_SERVER"])
 
591
            node_group2 = TestNodeGroup(
 
592
                'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
 
593
                                     "GANGLIA_MONITOR"])
 
594
 
 
595
            cluster = base.TestCluster([node_group1, node_group2])
 
596
            cluster_config = cs.ClusterSpec(cluster_config_file, '2.0.6')
 
597
            cluster_config.create_operational_config(cluster, [])
 
598
            # core-site
 
599
            self.assertEqual(
 
600
                '022',
 
601
                cluster_config.configurations['hdfs-site']
 
602
                ['fs.permissions.umask-mode'])
 
603
        finally:
 
604
            s2.CONF = s_conf
 
605
 
 
606
    def test_parse_default(self, patched):
 
607
        cluster_config_file = pkg.resource_string(
 
608
            version.version_info.package,
 
609
            'plugins/hdp/versions/version_2_0_6/resources/'
 
610
            'default-cluster.template')
 
611
 
 
612
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
613
 
 
614
        self._assert_services(cluster_config.services)
 
615
        self._assert_configurations(cluster_config.configurations)
 
616
 
 
617
        node_groups = cluster_config.node_groups
 
618
        self.assertEqual(2, len(node_groups))
 
619
        master_node_group = node_groups['master']
 
620
        self.assertEqual('master', master_node_group.name)
 
621
        self.assertIsNone(master_node_group.predicate)
 
622
        self.assertEqual('1', master_node_group.cardinality)
 
623
        self.assertEqual(8, len(master_node_group.components))
 
624
        self.assertIn('NAMENODE', master_node_group.components)
 
625
        self.assertIn('RESOURCEMANAGER', master_node_group.components)
 
626
        self.assertIn('HISTORYSERVER', master_node_group.components)
 
627
        self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
 
628
        self.assertIn('GANGLIA_SERVER', master_node_group.components)
 
629
        self.assertIn('NAGIOS_SERVER', master_node_group.components)
 
630
        self.assertIn('AMBARI_SERVER', master_node_group.components)
 
631
        self.assertIn('ZOOKEEPER_SERVER', master_node_group.components)
 
632
 
 
633
        slave_node_group = node_groups['slave']
 
634
        self.assertEqual('slave', slave_node_group.name)
 
635
        self.assertIsNone(slave_node_group.predicate)
 
636
        self.assertEqual('1+', slave_node_group.cardinality)
 
637
        self.assertEqual(5, len(slave_node_group.components))
 
638
        self.assertIn('DATANODE', slave_node_group.components)
 
639
        self.assertIn('NODEMANAGER', slave_node_group.components)
 
640
        self.assertIn('HDFS_CLIENT', slave_node_group.components)
 
641
        self.assertIn('YARN_CLIENT', slave_node_group.components)
 
642
        self.assertIn('MAPREDUCE2_CLIENT', slave_node_group.components)
 
643
 
 
644
        return cluster_config
 
645
 
 
646
    def test_ambari_rpm(self, patched):
 
647
        cluster_config_file = pkg.resource_string(
 
648
            version.version_info.package,
 
649
            'plugins/hdp/versions/version_2_0_6/resources/'
 
650
            'default-cluster.template')
 
651
 
 
652
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
653
 
 
654
        self._assert_configurations(cluster_config.configurations)
 
655
        ambari_config = cluster_config.configurations['ambari']
 
656
        self.assertIsNotNone('no rpm uri found',
 
657
                             ambari_config.get('rpm', None))
 
658
 
 
659
    def test_normalize(self, patched):
 
660
        cluster_config_file = pkg.resource_string(
 
661
            version.version_info.package,
 
662
            'plugins/hdp/versions/version_2_0_6/resources/'
 
663
            'default-cluster.template')
 
664
 
 
665
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
666
        cluster = cluster_config.normalize()
 
667
 
 
668
        configs = cluster.cluster_configs
 
669
        contains_dfs_datanode_http_address = False
 
670
        contains_staging_dir = False
 
671
        contains_mapred_user = False
 
672
 
 
673
        for entry in configs:
 
674
            config = entry.config
 
675
            # assert some random configurations across targets
 
676
            if config.name == 'dfs.datanode.http.address':
 
677
                contains_dfs_datanode_http_address = True
 
678
                self.assertEqual('string', config.type)
 
679
                self.assertEqual('0.0.0.0:50075', config.default_value)
 
680
                self.assertEqual('HDFS', config.applicable_target)
 
681
 
 
682
            if config.name == 'yarn.app.mapreduce.am.staging-dir':
 
683
                contains_staging_dir = True
 
684
                self.assertEqual('string', config.type)
 
685
                self.assertEqual(
 
686
                    '/user',
 
687
                    config.default_value)
 
688
                self.assertEqual('MAPREDUCE2',
 
689
                                 config.applicable_target)
 
690
 
 
691
            if config.name == 'mapred_user':
 
692
                contains_mapred_user = True
 
693
                self.assertEqual('string', config.type)
 
694
                self.assertEqual('mapred', config.default_value)
 
695
                self.assertEqual('MAPREDUCE2', config.applicable_target)
 
696
 
 
697
                #            print 'Config: name: {0}, type:{1},
 
698
                # default value:{2}, target:{3}, Value:{4}'.format(
 
699
                #                config.name, config.type,
 
700
                # config.default_value,
 
701
                #  config.applicable_target, entry.value)
 
702
 
 
703
        self.assertTrue(contains_dfs_datanode_http_address)
 
704
        self.assertTrue(contains_staging_dir)
 
705
        self.assertTrue(contains_mapred_user)
 
706
        node_groups = cluster.node_groups
 
707
        self.assertEqual(2, len(node_groups))
 
708
        contains_master_group = False
 
709
        contains_slave_group = False
 
710
        for i in range(2):
 
711
            node_group = node_groups[i]
 
712
            components = node_group.node_processes
 
713
            if node_group.name == "master":
 
714
                contains_master_group = True
 
715
                self.assertEqual(8, len(components))
 
716
                self.assertIn('NAMENODE', components)
 
717
                self.assertIn('RESOURCEMANAGER', components)
 
718
                self.assertIn('HISTORYSERVER', components)
 
719
                self.assertIn('SECONDARY_NAMENODE', components)
 
720
                self.assertIn('GANGLIA_SERVER', components)
 
721
                self.assertIn('NAGIOS_SERVER', components)
 
722
                self.assertIn('AMBARI_SERVER', components)
 
723
                self.assertIn('ZOOKEEPER_SERVER', components)
 
724
                # TODO(jspeidel): node configs
 
725
                # TODO(jspeidel): vm_requirements
 
726
            elif node_group.name == 'slave':
 
727
                contains_slave_group = True
 
728
                self.assertEqual(5, len(components))
 
729
                self.assertIn('DATANODE', components)
 
730
                self.assertIn('NODEMANAGER', components)
 
731
                self.assertIn('HDFS_CLIENT', components)
 
732
                self.assertIn('YARN_CLIENT', components)
 
733
                self.assertIn('MAPREDUCE2_CLIENT', components)
 
734
                # TODO(jspeidel): node configs
 
735
                # TODO(jspeidel): vm requirements
 
736
            else:
 
737
                self.fail('Unexpected node group: {0}'.format(node_group.name))
 
738
        self.assertTrue(contains_master_group)
 
739
        self.assertTrue(contains_slave_group)
 
740
 
 
741
    def test_existing_config_item_in_top_level_within_blueprint(self, patched):
 
742
        cluster_config_file = pkg.resource_string(
 
743
            version.version_info.package,
 
744
            'plugins/hdp/versions/version_2_0_6/resources/'
 
745
            'default-cluster.template')
 
746
 
 
747
        user_input_config = TestUserInputConfig(
 
748
            'global', 'OOZIE', 'oozie_log_dir')
 
749
        user_input = provisioning.UserInput(user_input_config,
 
750
                                            '/some/new/path')
 
751
 
 
752
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
753
                                  '111.11.1111', '222.11.1111')
 
754
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
755
                                  '222.22.2222', '333.22.2222')
 
756
 
 
757
        node_group1 = TestNodeGroup(
 
758
            'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
759
                                  "HISTORYSERVER", "SECONDARY_NAMENODE",
 
760
                                  "GANGLIA_SERVER", "GANGLIA_MONITOR",
 
761
                                  "NAGIOS_SERVER", "AMBARI_SERVER",
 
762
                                  "ZOOKEEPER_SERVER", "AMBARI_AGENT"])
 
763
        node_group2 = TestNodeGroup(
 
764
            'slave', [server2], ["NODEMANAGER", "DATANODE",
 
765
                                 "AMBARI_AGENT", "GANGLIA_MONITOR"])
 
766
 
 
767
        cluster = base.TestCluster([node_group1, node_group2])
 
768
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
769
        cluster_config.create_operational_config(cluster, [user_input])
 
770
        self.assertEqual('/some/new/path', cluster_config.configurations
 
771
                         ['global']['oozie_log_dir'])
 
772
 
 
773
    def test_new_config_item_in_top_level_within_blueprint(self, patched):
 
774
        cluster_config_file = pkg.resource_string(
 
775
            version.version_info.package,
 
776
            'plugins/hdp/versions/version_2_0_6/resources/'
 
777
            'default-cluster.template')
 
778
 
 
779
        user_input_config = TestUserInputConfig(
 
780
            'global', 'general', 'new_property')
 
781
        user_input = provisioning.UserInput(user_input_config, 'foo')
 
782
 
 
783
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
784
                                  '111.11.1111', '222.11.1111')
 
785
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
786
                                  '222.22.2222', '333.22.2222')
 
787
 
 
788
        node_group1 = TestNodeGroup(
 
789
            'master', [server1],
 
790
            ["NAMENODE", "RESOURCEMANAGER",
 
791
             "HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
 
792
             "GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
 
793
             "ZOOKEEPER_SERVER", "AMBARI_AGENT"])
 
794
        node_group2 = TestNodeGroup(
 
795
            'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
 
796
                                 "GANGLIA_MONITOR"])
 
797
 
 
798
        cluster = base.TestCluster([node_group1, node_group2])
 
799
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
800
        cluster_config.create_operational_config(cluster, [user_input])
 
801
        self.assertEqual(
 
802
            'foo', cluster_config.configurations['global']['new_property'])
 
803
 
 
804
    def test_topology_configuration_no_hypervisor(self, patched):
 
805
        s_conf = s2.CONF
 
806
        th_conf = th.CONF
 
807
        try:
 
808
            s2.CONF = TestCONF(True, False)
 
809
            th.CONF = TestCONF(True, False)
 
810
            cluster_config_file = pkg.resource_string(
 
811
                version.version_info.package,
 
812
                'plugins/hdp/versions/version_2_0_6/resources/'
 
813
                'default-cluster.template')
 
814
 
 
815
            server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
816
                                      '111.11.1111', '222.11.1111')
 
817
            server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
818
                                      '222.22.2222', '333.22.2222')
 
819
 
 
820
            node_group1 = TestNodeGroup(
 
821
                'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
822
                                      "HISTORYSERVER", "SECONDARY_NAMENODE",
 
823
                                      "GANGLIA_SERVER", "GANGLIA_MONITOR",
 
824
                                      "NAGIOS_SERVER", "AMBARI_SERVER",
 
825
                                      "ZOOKEEPER_SERVER", "AMBARI_AGENT"])
 
826
            node_group2 = TestNodeGroup(
 
827
                'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
 
828
                                     "GANGLIA_MONITOR"])
 
829
 
 
830
            cluster = base.TestCluster([node_group1, node_group2])
 
831
            cluster_config = cs.ClusterSpec(cluster_config_file,
 
832
                                            version='2.0.6')
 
833
            cluster_config.create_operational_config(cluster, [])
 
834
            # core-site
 
835
            self.assertEqual(
 
836
                'org.apache.hadoop.net.NetworkTopology',
 
837
                cluster_config.configurations['core-site']
 
838
                ['net.topology.impl'])
 
839
            self.assertEqual(
 
840
                'true',
 
841
                cluster_config.configurations['core-site']
 
842
                ['net.topology.nodegroup.aware'])
 
843
            self.assertEqual(
 
844
                'org.apache.hadoop.hdfs.server.namenode.'
 
845
                'BlockPlacementPolicyWithNodeGroup',
 
846
                cluster_config.configurations['core-site']
 
847
                ['dfs.block.replicator.classname'])
 
848
            self.assertEqual(
 
849
                'true',
 
850
                cluster_config.configurations['core-site']
 
851
                ['fs.swift.service.sahara.location-aware'])
 
852
            self.assertEqual(
 
853
                'org.apache.hadoop.net.ScriptBasedMapping',
 
854
                cluster_config.configurations['core-site']
 
855
                ['net.topology.node.switch.mapping.impl'])
 
856
            self.assertEqual(
 
857
                '/etc/hadoop/conf/topology.sh',
 
858
                cluster_config.configurations['core-site']
 
859
                ['net.topology.script.file.name'])
 
860
 
 
861
            # mapred-site
 
862
            self.assertEqual(
 
863
                'true',
 
864
                cluster_config.configurations['mapred-site']
 
865
                ['mapred.jobtracker.nodegroup.aware'])
 
866
            self.assertEqual(
 
867
                '3',
 
868
                cluster_config.configurations['mapred-site']
 
869
                ['mapred.task.cache.levels'])
 
870
            self.assertEqual(
 
871
                'org.apache.hadoop.mapred.JobSchedulableWithNodeGroup',
 
872
                cluster_config.configurations['mapred-site']
 
873
                ['mapred.jobtracker.jobSchedulable'])
 
874
        finally:
 
875
            s2.CONF = s_conf
 
876
            th.CONF = th_conf
 
877
 
 
878
    def test_topology_configuration_with_hypervisor(self, patched):
 
879
        s_conf = s2.CONF
 
880
        try:
 
881
            s2.CONF = TestCONF(True, True)
 
882
            cluster_config_file = pkg.resource_string(
 
883
                version.version_info.package,
 
884
                'plugins/hdp/versions/version_2_0_6/resources/'
 
885
                'default-cluster.template')
 
886
 
 
887
            server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
888
                                      '111.11.1111', '222.11.1111')
 
889
            server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
890
                                      '222.22.2222', '333.22.2222')
 
891
 
 
892
            node_group1 = TestNodeGroup(
 
893
                'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
894
                                      "HISTORYSERVER", "SECONDARY_NAMENODE",
 
895
                                      "GANGLIA_SERVER", "GANGLIA_MONITOR",
 
896
                                      "NAGIOS_SERVER", "AMBARI_SERVER",
 
897
                                      "ZOOKEEPER_SERVER", "AMBARI_AGENT"])
 
898
            node_group2 = TestNodeGroup(
 
899
                'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
 
900
                                     "GANGLIA_MONITOR"])
 
901
 
 
902
            cluster = base.TestCluster([node_group1, node_group2])
 
903
            cluster_config = cs.ClusterSpec(cluster_config_file,
 
904
                                            version='2.0.6')
 
905
            cluster_config.create_operational_config(cluster, [])
 
906
            # core-site
 
907
            self.assertEqual(
 
908
                'org.apache.hadoop.net.NetworkTopologyWithNodeGroup',
 
909
                cluster_config.configurations['core-site']
 
910
                ['net.topology.impl'])
 
911
        finally:
 
912
            s2.CONF = s_conf
 
913
 
 
914
    def test_update_ambari_admin_user(self, patched):
 
915
        cluster_config_file = pkg.resource_string(
 
916
            version.version_info.package,
 
917
            'plugins/hdp/versions/version_2_0_6/resources/'
 
918
            'default-cluster.template')
 
919
 
 
920
        user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
 
921
                                                'ambari.admin.user')
 
922
        user_input = provisioning.UserInput(user_input_config, 'new-user')
 
923
 
 
924
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
925
                                  '111.11.1111', '222.11.1111')
 
926
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
927
                                  '222.22.2222', '333.22.2222')
 
928
 
 
929
        node_group1 = TestNodeGroup(
 
930
            'master',
 
931
            [server1],
 
932
            ["NAMENODE",
 
933
             "RESOURCEMANAGER",
 
934
             "HISTORYSERVER",
 
935
             "SECONDARY_NAMENODE",
 
936
             "GANGLIA_SERVER",
 
937
             "GANGLIA_MONITOR",
 
938
             "NAGIOS_SERVER",
 
939
             "AMBARI_SERVER",
 
940
             "ZOOKEEPER_SERVER",
 
941
             "AMBARI_AGENT"])
 
942
        node_group2 = TestNodeGroup(
 
943
            'slave',
 
944
            [server2],
 
945
            ["NODEMANAGER",
 
946
             "DATANODE",
 
947
             "AMBARI_AGENT",
 
948
             "GANGLIA_MONITOR"])
 
949
 
 
950
        cluster = base.TestCluster([node_group1, node_group2])
 
951
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
952
        cluster_config.create_operational_config(cluster, [user_input])
 
953
        ambari_service = next(service for service in cluster_config.services
 
954
                              if service.name == 'AMBARI')
 
955
        users = ambari_service.users
 
956
        self.assertEqual(1, len(users))
 
957
        self.assertEqual('new-user', users[0].name)
 
958
 
 
959
    def test_update_ambari_admin_password(self, patched):
 
960
        cluster_config_file = pkg.resource_string(
 
961
            version.version_info.package,
 
962
            'plugins/hdp/versions/version_2_0_6/resources/'
 
963
            'default-cluster.template')
 
964
 
 
965
        user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
 
966
                                                'ambari.admin.password')
 
967
        user_input = provisioning.UserInput(user_input_config, 'new-pwd')
 
968
 
 
969
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
970
                                  '111.11.1111', '222.11.1111')
 
971
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
972
                                  '222.22.2222', '333.22.2222')
 
973
 
 
974
        node_group1 = TestNodeGroup(
 
975
            'master',
 
976
            [server1],
 
977
            ["NAMENODE",
 
978
             "RESOURCEMANAGER",
 
979
             "HISTORYSERVER",
 
980
             "SECONDARY_NAMENODE",
 
981
             "GANGLIA_SERVER",
 
982
             "GANGLIA_MONITOR",
 
983
             "NAGIOS_SERVER",
 
984
             "AMBARI_SERVER",
 
985
             "ZOOKEEPER_SERVER",
 
986
             "AMBARI_AGENT"])
 
987
        node_group2 = TestNodeGroup(
 
988
            'slave',
 
989
            [server2],
 
990
            ["NODEMANAGER",
 
991
             "DATANODE",
 
992
             "AMBARI_AGENT",
 
993
             "GANGLIA_MONITOR"])
 
994
 
 
995
        cluster = base.TestCluster([node_group1, node_group2])
 
996
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
997
        cluster_config.create_operational_config(cluster, [user_input])
 
998
        ambari_service = next(service for service in cluster_config.services
 
999
                              if service.name == 'AMBARI')
 
1000
        users = ambari_service.users
 
1001
        self.assertEqual(1, len(users))
 
1002
        self.assertEqual('new-pwd', users[0].password)
 
1003
 
 
1004
    def test_update_ambari_admin_user_and_password(self, patched):
 
1005
        cluster_config_file = pkg.resource_string(
 
1006
            version.version_info.package,
 
1007
            'plugins/hdp/versions/version_2_0_6/resources/'
 
1008
            'default-cluster.template')
 
1009
 
 
1010
        user_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
 
1011
                                                     'ambari.admin.user')
 
1012
        pwd_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
 
1013
                                                    'ambari.admin.password')
 
1014
        user_user_input = provisioning.UserInput(user_user_input_config,
 
1015
                                                 'new-admin_user')
 
1016
        pwd_user_input = provisioning.UserInput(pwd_user_input_config,
 
1017
                                                'new-admin_pwd')
 
1018
 
 
1019
        server1 = base.TestServer('host1', 'test-master', '11111', 3,
 
1020
                                  '111.11.1111', '222.11.1111')
 
1021
        server2 = base.TestServer('host2', 'test-slave', '11111', 3,
 
1022
                                  '222.22.2222', '333.22.2222')
 
1023
 
 
1024
        node_group1 = TestNodeGroup(
 
1025
            'one', [server1], ["NAMENODE", "RESOURCEMANAGER",
 
1026
                               "HISTORYSERVER", "SECONDARY_NAMENODE",
 
1027
                               "GANGLIA_SERVER", "GANGLIA_MONITOR",
 
1028
                               "NAGIOS_SERVER", "AMBARI_SERVER",
 
1029
                               "ZOOKEEPER_SERVER", "AMBARI_AGENT"])
 
1030
        node_group2 = TestNodeGroup(
 
1031
            'two', [server2], ["NODEMANAGER", "DATANODE",
 
1032
                               "AMBARI_AGENT", "GANGLIA_MONITOR"])
 
1033
 
 
1034
        cluster = base.TestCluster([node_group1, node_group2])
 
1035
        cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
 
1036
        cluster_config.create_operational_config(
 
1037
            cluster, [user_user_input, pwd_user_input])
 
1038
        ambari_service = next(service for service in cluster_config.services
 
1039
                              if service.name == 'AMBARI')
 
1040
        users = ambari_service.users
 
1041
        self.assertEqual(1, len(users))
 
1042
        self.assertEqual('new-admin_user', users[0].name)
 
1043
        self.assertEqual('new-admin_pwd', users[0].password)
 
1044
 
 
1045
    def test_validate_missing_hdfs(self, patched):
 
1046
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1047
                                 '111.11.1111', '222.22.2222')
 
1048
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1049
                                  '111.11.1112', '222.22.2223')
 
1050
 
 
1051
        node_group = TestNodeGroup(
 
1052
            'slave', [server], ["NODEMANAGER", "MAPREDUCE2_CLIENT",
 
1053
                                "HISTORYSERVER"])
 
1054
 
 
1055
        node_group2 = TestNodeGroup(
 
1056
            'master', [server2], ["RESOURCEMANAGER", "ZOOKEEPER_SERVER"])
 
1057
 
 
1058
        cluster = base.TestCluster([node_group, node_group2])
 
1059
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1060
        # should fail due to missing hdfs service
 
1061
        try:
 
1062
            cluster_config.create_operational_config(cluster, [])
 
1063
            self.fail('Validation should have thrown an exception')
 
1064
        except ex.RequiredServiceMissingException:
 
1065
            # expected
 
1066
            pass
 
1067
 
 
1068
    def test_validate_missing_mr2(self, patched):
 
1069
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1070
                                 '111.11.1111', '222.22.2222')
 
1071
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1072
                                  '111.11.1112', '222.22.2223')
 
1073
 
 
1074
        node_group = TestNodeGroup(
 
1075
            'slave', [server], ["DATANODE"])
 
1076
 
 
1077
        node_group2 = TestNodeGroup(
 
1078
            'master', [server2], ["NAMENODE", "ZOOKEEPER_SERVER"])
 
1079
 
 
1080
        cluster = base.TestCluster([node_group, node_group2])
 
1081
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1082
        # should fail due to missing mr service
 
1083
        try:
 
1084
            cluster_config.create_operational_config(cluster, [])
 
1085
            self.fail('Validation should have thrown an exception')
 
1086
        except ex.RequiredServiceMissingException:
 
1087
            # expected
 
1088
            pass
 
1089
 
 
1090
    def test_validate_missing_ambari(self, patched):
 
1091
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1092
                                 '111.11.1111', '222.22.2222')
 
1093
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1094
                                  '111.11.1112', '222.22.2223')
 
1095
 
 
1096
        node_group = TestNodeGroup(
 
1097
            'slave', [server], ["NAMENODE", "RESOURCEMANAGER",
 
1098
                                "ZOOKEEPER_SERVER"])
 
1099
 
 
1100
        node_group2 = TestNodeGroup(
 
1101
            'master', [server2], ["DATANODE", "NODEMANAGER"])
 
1102
 
 
1103
        cluster = base.TestCluster([node_group, node_group2])
 
1104
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1105
        # should fail due to missing ambari service
 
1106
        try:
 
1107
            cluster_config.create_operational_config(cluster, [])
 
1108
            self.fail('Validation should have thrown an exception')
 
1109
        except ex.RequiredServiceMissingException:
 
1110
            # expected
 
1111
            pass
 
1112
 
 
1113
    # TODO(jspeidel): move validate_* to test_services when validate
 
1114
    # is called independently of cluspterspec
 
1115
    def test_validate_hdfs(self, patched):
 
1116
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1117
                                 '111.11.1111', '222.22.2222')
 
1118
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1119
                                  '111.11.1112', '222.22.2223')
 
1120
 
 
1121
        node_group = TestNodeGroup(
 
1122
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1123
                                "HDFS_CLIENT", "MAPREDUCE2_CLIENT"], 1)
 
1124
 
 
1125
        node_group2 = TestNodeGroup(
 
1126
            'master', [server2], ["RESOURCEMANAGER", "AMBARI_SERVER",
 
1127
                                  "ZOOKEEPER_SERVER"])
 
1128
 
 
1129
        cluster = base.TestCluster([node_group, node_group2])
 
1130
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1131
        # should fail due to missing NN
 
1132
        try:
 
1133
            cluster_config.create_operational_config(cluster, [])
 
1134
            self.fail('Validation should have thrown an exception')
 
1135
        except ex.InvalidComponentCountException:
 
1136
            # expected
 
1137
            pass
 
1138
 
 
1139
        node_group2 = TestNodeGroup(
 
1140
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1141
                                  "HISTORYSERVER", "ZOOKEEPER_SERVER",
 
1142
                                  "AMBARI_SERVER"])
 
1143
        cluster = base.TestCluster([node_group, node_group2])
 
1144
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1145
        # should validate successfully now
 
1146
        cluster_config.create_operational_config(cluster, [])
 
1147
 
 
1148
        # should cause validation exception due to 2 NN
 
1149
        node_group3 = TestNodeGroup(
 
1150
            'master2', [server2], ["NAMENODE"])
 
1151
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1152
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1153
        try:
 
1154
            cluster_config.create_operational_config(cluster, [])
 
1155
            self.fail('Validation should have thrown an exception')
 
1156
        except ex.InvalidComponentCountException:
 
1157
            # expected
 
1158
            pass
 
1159
 
 
1160
    def test_validate_yarn(self, patched):
 
1161
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1162
                                 '111.11.1111', '222.22.2222')
 
1163
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1164
                                  '111.11.1112', '222.22.2223')
 
1165
 
 
1166
        node_group = TestNodeGroup(
 
1167
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1168
                                "HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
 
1169
        node_group2 = TestNodeGroup(
 
1170
            'master', [server2], ["NAMENODE", "AMBARI_SERVER",
 
1171
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER"])
 
1172
 
 
1173
        cluster = base.TestCluster([node_group, node_group2])
 
1174
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1175
        # should fail due to missing JT
 
1176
        try:
 
1177
            cluster_config.create_operational_config(cluster, [])
 
1178
            self.fail('Validation should have thrown an exception')
 
1179
        except ex.InvalidComponentCountException:
 
1180
            # expected
 
1181
            pass
 
1182
        node_group2 = TestNodeGroup(
 
1183
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1184
                                  "AMBARI_SERVER", "ZOOKEEPER_SERVER",
 
1185
                                  "HISTORYSERVER"])
 
1186
        cluster = base.TestCluster([node_group, node_group2])
 
1187
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1188
        # should validate successfully now
 
1189
        cluster_config.create_operational_config(cluster, [])
 
1190
 
 
1191
        # should cause validation exception due to 2 JT
 
1192
        node_group3 = TestNodeGroup(
 
1193
            'master', [server2], ["RESOURCEMANAGER"])
 
1194
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1195
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1196
        try:
 
1197
            cluster_config.create_operational_config(cluster, [])
 
1198
            self.fail('Validation should have thrown an exception')
 
1199
        except ex.InvalidComponentCountException:
 
1200
            # expected
 
1201
            pass
 
1202
 
 
1203
        # should cause validation exception due to 2 NN
 
1204
        node_group3 = TestNodeGroup(
 
1205
            'master', [server2], ["NAMENODE"])
 
1206
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1207
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1208
        try:
 
1209
            cluster_config.create_operational_config(cluster, [])
 
1210
            self.fail('Validation should have thrown an exception')
 
1211
        except ex.InvalidComponentCountException:
 
1212
            # expected
 
1213
            pass
 
1214
 
 
1215
        # should fail due to no nodemanager
 
1216
        node_group = TestNodeGroup(
 
1217
            'slave', [server], ["DATANODE", "HDFS_CLIENT",
 
1218
                                "MAPREDUCE2_CLIENT"])
 
1219
        cluster = base.TestCluster([node_group, node_group2])
 
1220
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1221
        # should fail due to missing JT
 
1222
        try:
 
1223
            cluster_config.create_operational_config(cluster, [])
 
1224
            self.fail('Validation should have thrown an exception')
 
1225
        except ex.InvalidComponentCountException:
 
1226
            # expected
 
1227
            pass
 
1228
 
 
1229
    def test_validate_hive(self, patched):
 
1230
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1231
                                 '111.11.1111', '222.22.2222')
 
1232
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1233
                                  '111.11.1112', '222.22.2223')
 
1234
 
 
1235
        node_group = TestNodeGroup(
 
1236
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1237
                                "HIVE_CLIENT"])
 
1238
        node_group2 = TestNodeGroup(
 
1239
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1240
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1241
                                  "ZOOKEEPER_SERVER"])
 
1242
 
 
1243
        cluster = base.TestCluster([node_group, node_group2])
 
1244
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1245
        # should fail due to missing hive_server
 
1246
        try:
 
1247
            cluster_config.create_operational_config(cluster, [])
 
1248
            self.fail('Validation should have thrown an exception')
 
1249
        except ex.InvalidComponentCountException:
 
1250
            # expected
 
1251
            pass
 
1252
        node_group2 = TestNodeGroup(
 
1253
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1254
                                  "HIVE_SERVER", "AMBARI_SERVER",
 
1255
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER"])
 
1256
        cluster = base.TestCluster([node_group, node_group2])
 
1257
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1258
        # should validate successfully now
 
1259
        cluster_config.create_operational_config(cluster, [])
 
1260
 
 
1261
        # should cause validation exception due to 2 HIVE_SERVER
 
1262
        node_group3 = TestNodeGroup(
 
1263
            'master', [server2], ["HIVE_SERVER"])
 
1264
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1265
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1266
        try:
 
1267
            cluster_config.create_operational_config(cluster, [])
 
1268
            self.fail('Validation should have thrown an exception')
 
1269
        except ex.InvalidComponentCountException:
 
1270
            # expected
 
1271
            pass
 
1272
 
 
1273
    def test_validate_zk(self, patched):
 
1274
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1275
                                 '111.11.1111', '222.22.2222')
 
1276
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1277
                                  '111.11.1112', '222.22.2223')
 
1278
 
 
1279
        node_group = TestNodeGroup(
 
1280
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1281
                                "ZOOKEEPER_CLIENT"])
 
1282
        node_group2 = TestNodeGroup(
 
1283
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1284
                                  "AMBARI_SERVER", "HISTORYSERVER"])
 
1285
 
 
1286
        cluster = base.TestCluster([node_group, node_group2])
 
1287
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1288
        # should fail due to missing ZOOKEEPER_SERVER
 
1289
        try:
 
1290
            cluster_config.create_operational_config(cluster, [])
 
1291
            self.fail('Validation should have thrown an exception')
 
1292
        except ex.InvalidComponentCountException:
 
1293
            # expected
 
1294
            pass
 
1295
        node_group2 = TestNodeGroup(
 
1296
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1297
                                  "HISTORYSERVER", "ZOOKEEPER_SERVER",
 
1298
                                  "AMBARI_SERVER"])
 
1299
        cluster = base.TestCluster([node_group, node_group2])
 
1300
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1301
        # should validate successfully now
 
1302
        cluster_config.create_operational_config(cluster, [])
 
1303
 
 
1304
        # should cause validation exception due to 2 ZOOKEEPER_SERVER
 
1305
        node_group3 = TestNodeGroup(
 
1306
            'master', [server2], ["ZOOKEEPER_SERVER"])
 
1307
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1308
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1309
        try:
 
1310
            cluster_config.create_operational_config(cluster, [])
 
1311
            self.fail('Validation should have thrown an exception')
 
1312
        except ex.InvalidComponentCountException:
 
1313
            # expected
 
1314
            pass
 
1315
 
 
1316
    def test_validate_oozie(self, patched):
 
1317
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1318
                                 '111.11.1111', '222.22.2222')
 
1319
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1320
                                  '111.11.1112', '222.22.2223')
 
1321
 
 
1322
        node_group = TestNodeGroup(
 
1323
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1324
                                "OOZIE_CLIENT"])
 
1325
        node_group2 = TestNodeGroup(
 
1326
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1327
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1328
                                  "ZOOKEEPER_SERVER"])
 
1329
 
 
1330
        cluster = base.TestCluster([node_group, node_group2])
 
1331
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1332
        # should fail due to missing OOZIE_SERVER
 
1333
        try:
 
1334
            cluster_config.create_operational_config(cluster, [])
 
1335
            self.fail('Validation should have thrown an exception')
 
1336
        except ex.InvalidComponentCountException:
 
1337
            # expected
 
1338
            pass
 
1339
        node_group2 = TestNodeGroup(
 
1340
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1341
                                  "OOZIE_SERVER", "AMBARI_SERVER",
 
1342
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER"])
 
1343
        cluster = base.TestCluster([node_group, node_group2])
 
1344
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1345
        # should validate successfully now
 
1346
        cluster_config.create_operational_config(cluster, [])
 
1347
 
 
1348
        # should cause validation exception due to 2 OOZIE_SERVER
 
1349
        node_group3 = TestNodeGroup(
 
1350
            'master', [server2], ["OOZIE_SERVER"])
 
1351
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1352
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1353
        try:
 
1354
            cluster_config.create_operational_config(cluster, [])
 
1355
            self.fail('Validation should have thrown an exception')
 
1356
        except ex.InvalidComponentCountException:
 
1357
            # expected
 
1358
            pass
 
1359
 
 
1360
    def test_validate_ganglia(self, patched):
 
1361
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1362
                                 '111.11.1111', '222.22.2222')
 
1363
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1364
                                  '111.11.1112', '222.22.2223')
 
1365
 
 
1366
        node_group = TestNodeGroup(
 
1367
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1368
                                "GANGLIA_MONITOR"])
 
1369
        node_group2 = TestNodeGroup(
 
1370
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1371
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1372
                                  "ZOOKEEPER_SERVER"])
 
1373
 
 
1374
        cluster = base.TestCluster([node_group, node_group2])
 
1375
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1376
        # should fail due to missing GANGLIA_SERVER
 
1377
        try:
 
1378
            cluster_config.create_operational_config(cluster, [])
 
1379
            self.fail('Validation should have thrown an exception')
 
1380
        except ex.InvalidComponentCountException:
 
1381
            # expected
 
1382
            pass
 
1383
        node_group2 = TestNodeGroup(
 
1384
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1385
                                  "GANGLIA_SERVER", "AMBARI_SERVER",
 
1386
                                  "HISTORYSERVER", "ZOOKEEPER_SERVER"])
 
1387
        cluster = base.TestCluster([node_group, node_group2])
 
1388
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1389
        # should validate successfully now
 
1390
        cluster_config.create_operational_config(cluster, [])
 
1391
 
 
1392
        # should cause validation exception due to 2 GANGLIA_SERVER
 
1393
        node_group3 = TestNodeGroup(
 
1394
            'master2', [server2], ["GANGLIA_SERVER"])
 
1395
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1396
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1397
        try:
 
1398
            cluster_config.create_operational_config(cluster, [])
 
1399
            self.fail('Validation should have thrown an exception')
 
1400
        except ex.InvalidComponentCountException:
 
1401
            # expected
 
1402
            pass
 
1403
 
 
1404
    def test_validate_ambari(self, patched):
 
1405
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1406
                                 '111.11.1111', '222.22.2222')
 
1407
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1408
                                  '111.11.1112', '222.22.2223')
 
1409
 
 
1410
        node_group = TestNodeGroup(
 
1411
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1412
                                "AMBARI_AGENT"])
 
1413
        node_group2 = TestNodeGroup(
 
1414
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1415
                                  "HISTORYSERVER", "ZOOKEEPER_SERVER"])
 
1416
 
 
1417
        cluster = base.TestCluster([node_group, node_group2])
 
1418
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1419
        # should fail due to missing AMBARI_SERVER
 
1420
        try:
 
1421
            cluster_config.create_operational_config(cluster, [])
 
1422
            self.fail('Validation should have thrown an exception')
 
1423
        except ex.InvalidComponentCountException:
 
1424
            # expected
 
1425
            pass
 
1426
        node_group2 = TestNodeGroup(
 
1427
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1428
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1429
                                  "ZOOKEEPER_SERVER"])
 
1430
        cluster = base.TestCluster([node_group, node_group2])
 
1431
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1432
        # should validate successfully now
 
1433
        cluster_config.create_operational_config(cluster, [])
 
1434
 
 
1435
        # should cause validation exception due to 2 AMBARI_SERVER
 
1436
        node_group2 = TestNodeGroup(
 
1437
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1438
                                  "AMBARI_SERVER", "ZOOKEEPER_SERVER"])
 
1439
        node_group3 = TestNodeGroup(
 
1440
            'master', [server2], ["AMBARI_SERVER"])
 
1441
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1442
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1443
        try:
 
1444
            cluster_config.create_operational_config(cluster, [])
 
1445
            self.fail('Validation should have thrown an exception')
 
1446
        except ex.InvalidComponentCountException:
 
1447
            # expected
 
1448
            pass
 
1449
 
 
1450
    def test_validate_hue(self, patched):
 
1451
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1452
                                 '111.11.1111', '222.22.2222')
 
1453
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1454
                                  '111.11.1112', '222.22.2223')
 
1455
 
 
1456
        node_group = TestNodeGroup(
 
1457
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1458
                                "HUE"])
 
1459
        node_group2 = TestNodeGroup(
 
1460
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1461
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1462
                                  "ZOOKEEPER_SERVER"])
 
1463
 
 
1464
        cluster = base.TestCluster([node_group, node_group2])
 
1465
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1466
        # should fail due to missing hive_server, oozie_server and
 
1467
        # webhchat_server which is required by hue
 
1468
        self.assertRaises(ex.RequiredServiceMissingException,
 
1469
                          cluster_config.create_operational_config,
 
1470
                          cluster, [])
 
1471
 
 
1472
        node_group2 = TestNodeGroup(
 
1473
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1474
                                  "HIVE_SERVER", "AMBARI_SERVER",
 
1475
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER"])
 
1476
        cluster = base.TestCluster([node_group, node_group2])
 
1477
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1478
        # should fail due to missing oozie_server and webhchat_server, which
 
1479
        # is required by hue
 
1480
        self.assertRaises(ex.RequiredServiceMissingException,
 
1481
                          cluster_config.create_operational_config,
 
1482
                          cluster, [])
 
1483
 
 
1484
        node_group = TestNodeGroup(
 
1485
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1486
                                "OOZIE_CLIENT", "HUE"])
 
1487
        node_group2 = TestNodeGroup(
 
1488
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1489
                                  "HIVE_SERVER", "AMBARI_SERVER",
 
1490
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER",
 
1491
                                  "OOZIE_SERVER"])
 
1492
        cluster = base.TestCluster([node_group, node_group2])
 
1493
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1494
        # should fail due to missing webhchat_server, which is required by hue
 
1495
        self.assertRaises(ex.RequiredServiceMissingException,
 
1496
                          cluster_config.create_operational_config,
 
1497
                          cluster, [])
 
1498
 
 
1499
        node_group = TestNodeGroup(
 
1500
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1501
                                "OOZIE_CLIENT", "HUE"])
 
1502
        node_group2 = TestNodeGroup(
 
1503
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1504
                                  "HIVE_SERVER", "AMBARI_SERVER",
 
1505
                                  "ZOOKEEPER_SERVER", "HISTORYSERVER",
 
1506
                                  "OOZIE_SERVER", "WEBHCAT_SERVER"])
 
1507
        cluster = base.TestCluster([node_group, node_group2])
 
1508
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1509
        # should validate successfully now
 
1510
        cluster_config.create_operational_config(cluster, [])
 
1511
 
 
1512
        # should have automatically added a HIVE_CLIENT to "slave" node group
 
1513
        hue_ngs = cluster_config.get_node_groups_containing_component("HUE")
 
1514
        self.assertEqual(1, len(hue_ngs))
 
1515
        self.assertIn("HIVE_CLIENT", hue_ngs.pop().components)
 
1516
 
 
1517
        # should cause validation exception due to 2 hue instances
 
1518
        node_group3 = TestNodeGroup(
 
1519
            'master', [server2], ["HUE"])
 
1520
        cluster = base.TestCluster([node_group, node_group2, node_group3])
 
1521
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1522
        self.assertRaises(ex.InvalidComponentCountException,
 
1523
                          cluster_config.create_operational_config,
 
1524
                          cluster, [])
 
1525
 
 
1526
    def test_validate_scaling_existing_ng(self, patched):
 
1527
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1528
                                 '111.11.1111', '222.22.2222')
 
1529
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1530
                                  '111.11.1112', '222.22.2223')
 
1531
 
 
1532
        node_group = TestNodeGroup(
 
1533
            'slave', [server], ["DATANODE", "NODEMANAGER"])
 
1534
        node_group2 = TestNodeGroup(
 
1535
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1536
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1537
                                  "ZOOKEEPER_SERVER"])
 
1538
 
 
1539
        cluster = base.TestCluster([node_group, node_group2])
 
1540
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1541
        # sanity check that original config validates
 
1542
        cluster_config.create_operational_config(cluster, [])
 
1543
 
 
1544
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1545
        scaled_groups = {'master': 2}
 
1546
        # should fail due to 2 JT
 
1547
        try:
 
1548
            cluster_config.create_operational_config(
 
1549
                cluster, [], scaled_groups)
 
1550
            self.fail('Validation should have thrown an exception')
 
1551
        except ex.InvalidComponentCountException:
 
1552
            # expected
 
1553
            pass
 
1554
 
 
1555
    def test_scale(self, patched):
 
1556
 
 
1557
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1558
                                 '111.11.1111', '222.22.2222')
 
1559
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1560
                                  '111.11.1112', '222.22.2223')
 
1561
 
 
1562
        node_group = TestNodeGroup(
 
1563
            'slave', [server], ["DATANODE", "NODEMANAGER",
 
1564
                                "AMBARI_AGENT"])
 
1565
        node_group2 = TestNodeGroup(
 
1566
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1567
                                  "HISTORYSERVER", "ZOOKEEPER_SERVER",
 
1568
                                  "AMBARI_SERVER"])
 
1569
 
 
1570
        cluster = base.TestCluster([node_group, node_group2])
 
1571
 
 
1572
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1573
        # sanity check that original config validates
 
1574
        cluster_config.create_operational_config(cluster, [])
 
1575
 
 
1576
        slave_ng = cluster_config.node_groups['slave']
 
1577
        self.assertEqual(1, slave_ng.count)
 
1578
 
 
1579
        cluster_config.scale({'slave': 2})
 
1580
 
 
1581
        self.assertEqual(2, slave_ng.count)
 
1582
 
 
1583
    def test_get_deployed_configurations(self, patched):
 
1584
 
 
1585
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1586
                                 '111.11.1111', '222.22.2222')
 
1587
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1588
                                  '111.11.1112', '222.22.2223')
 
1589
 
 
1590
        node_group = TestNodeGroup(
 
1591
            'slave', [server], ["DATANODE", "NODEMANAGER"])
 
1592
        node_group2 = TestNodeGroup(
 
1593
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1594
                                  "AMBARI_SERVER", "ZOOKEEPER_SERVER",
 
1595
                                  "HISTORYSERVER"])
 
1596
 
 
1597
        cluster = base.TestCluster([node_group, node_group2])
 
1598
 
 
1599
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1600
        # sanity check that original config validates
 
1601
        cluster_config.create_operational_config(cluster, [])
 
1602
        configs = cluster_config.get_deployed_configurations()
 
1603
        expected_configs = set(['mapred-site', 'ambari', 'hdfs-site',
 
1604
                                'global', 'core-site', 'yarn-site'])
 
1605
        self.assertEqual(expected_configs, expected_configs & configs)
 
1606
 
 
1607
    def test_get_deployed_node_group_count(self, patched):
 
1608
 
 
1609
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1610
                                 '111.11.1111', '222.22.2222')
 
1611
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1612
                                  '111.11.1112', '222.22.2223')
 
1613
 
 
1614
        slave_group = TestNodeGroup(
 
1615
            'slave', [server], ["DATANODE", "NODEMANAGER"])
 
1616
        slave2_group = TestNodeGroup(
 
1617
            'slave2', [server], ["DATANODE", "NODEMANAGER"])
 
1618
        master_group = TestNodeGroup(
 
1619
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1620
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1621
                                  "ZOOKEEPER_SERVER"])
 
1622
 
 
1623
        cluster = base.TestCluster([master_group, slave_group, slave2_group])
 
1624
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1625
        cluster_config.create_operational_config(cluster, [])
 
1626
 
 
1627
        self.assertEqual(2, cluster_config.get_deployed_node_group_count(
 
1628
            'DATANODE'))
 
1629
        self.assertEqual(1, cluster_config.get_deployed_node_group_count(
 
1630
            'AMBARI_SERVER'))
 
1631
 
 
1632
    def test_get_node_groups_containing_component(self, patched):
 
1633
        server = base.TestServer('host1', 'slave', '11111', 3,
 
1634
                                 '111.11.1111', '222.22.2222')
 
1635
        server2 = base.TestServer('host2', 'master', '11112', 3,
 
1636
                                  '111.11.1112', '222.22.2223')
 
1637
 
 
1638
        slave_group = TestNodeGroup(
 
1639
            'slave', [server], ["DATANODE", "NODEMANAGER"])
 
1640
        slave2_group = TestNodeGroup(
 
1641
            'slave2', [server], ["DATANODE", "NODEMANAGER"])
 
1642
        master_group = TestNodeGroup(
 
1643
            'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
 
1644
                                  "HISTORYSERVER", "AMBARI_SERVER",
 
1645
                                  "ZOOKEEPER_SERVER"])
 
1646
 
 
1647
        cluster = base.TestCluster([master_group, slave_group, slave2_group])
 
1648
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1649
        cluster_config.create_operational_config(cluster, [])
 
1650
 
 
1651
        datanode_ngs = cluster_config.get_node_groups_containing_component(
 
1652
            'DATANODE')
 
1653
        self.assertEqual(2, len(datanode_ngs))
 
1654
        ng_names = set([datanode_ngs[0].name, datanode_ngs[1].name])
 
1655
        self.assertIn('slave', ng_names)
 
1656
        self.assertIn('slave2', ng_names)
 
1657
 
 
1658
    def test_get_components_for_type(self, patched):
 
1659
 
 
1660
        cluster_config = base.create_clusterspec(hdp_version='2.0.6')
 
1661
        clients = cluster_config.get_components_for_type('CLIENT')
 
1662
        slaves = cluster_config.get_components_for_type('SLAVE')
 
1663
        masters = cluster_config.get_components_for_type('MASTER')
 
1664
 
 
1665
        expected_clients = set(['HCAT', 'ZOOKEEPER_CLIENT',
 
1666
                                'MAPREDUCE2_CLIENT', 'HIVE_CLIENT',
 
1667
                                'HDFS_CLIENT', 'PIG', 'YARN_CLIENT', 'HUE'])
 
1668
        self.assertEqual(expected_clients, expected_clients & set(clients))
 
1669
 
 
1670
        expected_slaves = set(['AMBARI_AGENT', 'NODEMANAGER', 'DATANODE',
 
1671
                               'GANGLIA_MONITOR'])
 
1672
        self.assertEqual(expected_slaves, expected_slaves & set(slaves))
 
1673
 
 
1674
        expected_masters = set(['SECONDARY_NAMENODE', 'HIVE_METASTORE',
 
1675
                                'AMBARI_SERVER', 'RESOURCEMANAGER',
 
1676
                                'WEBHCAT_SERVER', 'NAGIOS_SERVER',
 
1677
                                'MYSQL_SERVER', 'ZOOKEEPER_SERVER',
 
1678
                                'NAMENODE', 'HIVE_SERVER', 'GANGLIA_SERVER'])
 
1679
        self.assertEqual(expected_masters, expected_masters & set(masters))
 
1680
 
 
1681
    def _assert_services(self, services):
 
1682
        found_services = []
 
1683
        for service in services:
 
1684
            name = service.name
 
1685
            found_services.append(name)
 
1686
            self.service_validators[name](service)
 
1687
 
 
1688
        self.assertEqual(15, len(found_services))
 
1689
        self.assertIn('HDFS', found_services)
 
1690
        self.assertIn('MAPREDUCE2', found_services)
 
1691
        self.assertIn('GANGLIA', found_services)
 
1692
        self.assertIn('NAGIOS', found_services)
 
1693
        self.assertIn('AMBARI', found_services)
 
1694
        self.assertIn('PIG', found_services)
 
1695
        self.assertIn('HIVE', found_services)
 
1696
        self.assertIn('HCATALOG', found_services)
 
1697
        self.assertIn('ZOOKEEPER', found_services)
 
1698
        self.assertIn('WEBHCAT', found_services)
 
1699
        self.assertIn('OOZIE', found_services)
 
1700
        self.assertIn('SQOOP', found_services)
 
1701
        self.assertIn('HBASE', found_services)
 
1702
        self.assertIn('HUE', found_services)
 
1703
 
 
1704
    def _assert_hdfs(self, service):
 
1705
        self.assertEqual('HDFS', service.name)
 
1706
 
 
1707
        found_components = {}
 
1708
        for component in service.components:
 
1709
            found_components[component.name] = component
 
1710
 
 
1711
        self.assertEqual(4, len(found_components))
 
1712
        self._assert_component('NAMENODE', 'MASTER', "1",
 
1713
                               found_components['NAMENODE'])
 
1714
        self._assert_component('DATANODE', 'SLAVE', "1+",
 
1715
                               found_components['DATANODE'])
 
1716
        self._assert_component('SECONDARY_NAMENODE', 'MASTER', "1",
 
1717
                               found_components['SECONDARY_NAMENODE'])
 
1718
        self._assert_component('HDFS_CLIENT', 'CLIENT', "1+",
 
1719
                               found_components['HDFS_CLIENT'])
 
1720
        # TODO(jspeidel) config
 
1721
 
 
1722
    def _assert_mrv2(self, service):
 
1723
        self.assertEqual('MAPREDUCE2', service.name)
 
1724
 
 
1725
        found_components = {}
 
1726
        for component in service.components:
 
1727
            found_components[component.name] = component
 
1728
 
 
1729
        self.assertEqual(2, len(found_components))
 
1730
        self._assert_component('HISTORYSERVER', 'MASTER', "1",
 
1731
                               found_components['HISTORYSERVER'])
 
1732
        self._assert_component('MAPREDUCE2_CLIENT', 'CLIENT', "1+",
 
1733
                               found_components['MAPREDUCE2_CLIENT'])
 
1734
 
 
1735
    def _assert_yarn(self, service):
 
1736
        self.assertEqual('YARN', service.name)
 
1737
 
 
1738
        found_components = {}
 
1739
        for component in service.components:
 
1740
            found_components[component.name] = component
 
1741
 
 
1742
        self.assertEqual(3, len(found_components))
 
1743
        self._assert_component('RESOURCEMANAGER', 'MASTER', "1",
 
1744
                               found_components['RESOURCEMANAGER'])
 
1745
        self._assert_component('NODEMANAGER', 'SLAVE', "1+",
 
1746
                               found_components['NODEMANAGER'])
 
1747
        self._assert_component('YARN_CLIENT', 'CLIENT', "1+",
 
1748
                               found_components['YARN_CLIENT'])
 
1749
 
 
1750
    def _assert_nagios(self, service):
 
1751
        self.assertEqual('NAGIOS', service.name)
 
1752
 
 
1753
        found_components = {}
 
1754
        for component in service.components:
 
1755
            found_components[component.name] = component
 
1756
 
 
1757
        self.assertEqual(1, len(found_components))
 
1758
        self._assert_component('NAGIOS_SERVER', 'MASTER', "1",
 
1759
                               found_components['NAGIOS_SERVER'])
 
1760
 
 
1761
    def _assert_ganglia(self, service):
 
1762
        self.assertEqual('GANGLIA', service.name)
 
1763
 
 
1764
        found_components = {}
 
1765
        for component in service.components:
 
1766
            found_components[component.name] = component
 
1767
 
 
1768
        self.assertEqual(2, len(found_components))
 
1769
        self._assert_component('GANGLIA_SERVER', 'MASTER', "1",
 
1770
                               found_components['GANGLIA_SERVER'])
 
1771
        self._assert_component('GANGLIA_MONITOR', 'SLAVE', "1+",
 
1772
                               found_components['GANGLIA_MONITOR'])
 
1773
 
 
1774
    def _assert_ambari(self, service):
 
1775
        self.assertEqual('AMBARI', service.name)
 
1776
 
 
1777
        found_components = {}
 
1778
        for component in service.components:
 
1779
            found_components[component.name] = component
 
1780
 
 
1781
        self.assertEqual(2, len(found_components))
 
1782
        self._assert_component('AMBARI_SERVER', 'MASTER', "1",
 
1783
                               found_components['AMBARI_SERVER'])
 
1784
        self._assert_component('AMBARI_AGENT', 'SLAVE', "1+",
 
1785
                               found_components['AMBARI_AGENT'])
 
1786
 
 
1787
        self.assertEqual(1, len(service.users))
 
1788
        user = service.users[0]
 
1789
        self.assertEqual('admin', user.name)
 
1790
        self.assertEqual('admin', user.password)
 
1791
        groups = user.groups
 
1792
        self.assertEqual(1, len(groups))
 
1793
        self.assertIn('admin', groups)
 
1794
 
 
1795
    def _assert_pig(self, service):
 
1796
        self.assertEqual('PIG', service.name)
 
1797
        self.assertEqual(1, len(service.components))
 
1798
        self.assertEqual('PIG', service.components[0].name)
 
1799
 
 
1800
    def _assert_hive(self, service):
 
1801
        self.assertEqual('HIVE', service.name)
 
1802
        found_components = {}
 
1803
        for component in service.components:
 
1804
            found_components[component.name] = component
 
1805
 
 
1806
        self.assertEqual(4, len(found_components))
 
1807
        self._assert_component('HIVE_SERVER', 'MASTER', "1",
 
1808
                               found_components['HIVE_SERVER'])
 
1809
        self._assert_component('HIVE_METASTORE', 'MASTER', "1",
 
1810
                               found_components['HIVE_METASTORE'])
 
1811
        self._assert_component('MYSQL_SERVER', 'MASTER', "1",
 
1812
                               found_components['MYSQL_SERVER'])
 
1813
        self._assert_component('HIVE_CLIENT', 'CLIENT', "1+",
 
1814
                               found_components['HIVE_CLIENT'])
 
1815
 
 
1816
    def _assert_hcatalog(self, service):
 
1817
        self.assertEqual('HCATALOG', service.name)
 
1818
        self.assertEqual(1, len(service.components))
 
1819
        self.assertEqual('HCAT', service.components[0].name)
 
1820
 
 
1821
    def _assert_zookeeper(self, service):
 
1822
        self.assertEqual('ZOOKEEPER', service.name)
 
1823
        found_components = {}
 
1824
        for component in service.components:
 
1825
            found_components[component.name] = component
 
1826
 
 
1827
        self.assertEqual(2, len(found_components))
 
1828
        self._assert_component('ZOOKEEPER_SERVER', 'MASTER', "1",
 
1829
                               found_components['ZOOKEEPER_SERVER'])
 
1830
        self._assert_component('ZOOKEEPER_CLIENT', 'CLIENT', "1+",
 
1831
                               found_components['ZOOKEEPER_CLIENT'])
 
1832
 
 
1833
    def _assert_webhcat(self, service):
 
1834
        self.assertEqual('WEBHCAT', service.name)
 
1835
        self.assertEqual(1, len(service.components))
 
1836
        self.assertEqual('WEBHCAT_SERVER', service.components[0].name)
 
1837
 
 
1838
    def _assert_oozie(self, service):
 
1839
        self.assertEqual('OOZIE', service.name)
 
1840
        found_components = {}
 
1841
        for component in service.components:
 
1842
            found_components[component.name] = component
 
1843
 
 
1844
        self.assertEqual(2, len(found_components))
 
1845
        self._assert_component('OOZIE_SERVER', 'MASTER', "1",
 
1846
                               found_components['OOZIE_SERVER'])
 
1847
        self._assert_component('OOZIE_CLIENT', 'CLIENT', "1+",
 
1848
                               found_components['OOZIE_CLIENT'])
 
1849
 
 
1850
    def _assert_sqoop(self, service):
 
1851
        self.assertEqual('SQOOP', service.name)
 
1852
        self.assertEqual(1, len(service.components))
 
1853
        self.assertEqual('SQOOP', service.components[0].name)
 
1854
 
 
1855
    def _assert_hbase(self, service):
 
1856
        self.assertEqual('HBASE', service.name)
 
1857
        found_components = {}
 
1858
        for component in service.components:
 
1859
            found_components[component.name] = component
 
1860
 
 
1861
        self.assertEqual(3, len(found_components))
 
1862
        self._assert_component('HBASE_MASTER', 'MASTER', "1",
 
1863
                               found_components['HBASE_MASTER'])
 
1864
        self._assert_component('HBASE_REGIONSERVER', 'SLAVE', "1+",
 
1865
                               found_components['HBASE_REGIONSERVER'])
 
1866
        self._assert_component('HBASE_CLIENT', 'CLIENT', "1+",
 
1867
                               found_components['HBASE_CLIENT'])
 
1868
 
 
1869
    def _assert_hue(self, service):
 
1870
        self.assertEqual('HUE', service.name)
 
1871
        found_components = {}
 
1872
        for component in service.components:
 
1873
            found_components[component.name] = component
 
1874
 
 
1875
        self.assertEqual(1, len(found_components))
 
1876
        self._assert_component('HUE', 'CLIENT', "1",
 
1877
                               found_components['HUE'])
 
1878
 
 
1879
    def _assert_component(self, name, comp_type, cardinality, component):
 
1880
        self.assertEqual(name, component.name)
 
1881
        self.assertEqual(comp_type, component.type)
 
1882
        self.assertEqual(cardinality, component.cardinality)
 
1883
 
 
1884
    def _assert_configurations(self, configurations):
 
1885
        self.assertEqual(16, len(configurations))
 
1886
        self.assertIn('global', configurations)
 
1887
        self.assertIn('core-site', configurations)
 
1888
        self.assertIn('yarn-site', configurations)
 
1889
        self.assertIn('mapred-site', configurations)
 
1890
        self.assertIn('hdfs-site', configurations)
 
1891
        self.assertIn('ambari', configurations)
 
1892
        self.assertIn('webhcat-site', configurations)
 
1893
        self.assertIn('hive-site', configurations)
 
1894
        self.assertIn('oozie-site', configurations)
 
1895
        self.assertIn('hbase-site', configurations)
 
1896
        self.assertIn('capacity-scheduler', configurations)
 
1897
        self.assertIn('hue-ini', configurations)
 
1898
        self.assertIn('hue-core-site', configurations)
 
1899
        self.assertIn('hue-hdfs-site', configurations)
 
1900
        self.assertIn('hue-webhcat-site', configurations)
 
1901
        self.assertIn('hue-oozie-site', configurations)
 
1902
 
 
1903
 
 
1904
class TestNodeGroup:
 
1905
    def __init__(self, name, instances, node_processes, count=1):
 
1906
        self.name = name
 
1907
        self.instances = instances
 
1908
        for i in instances:
 
1909
            i.node_group = self
 
1910
        self.node_processes = node_processes
 
1911
        self.count = count
 
1912
        self.id = name
 
1913
 
 
1914
    def storage_paths(self):
 
1915
        return ['']
 
1916
 
 
1917
 
 
1918
class TestUserInputConfig:
 
1919
    def __init__(self, tag, target, name):
 
1920
        self.tag = tag
 
1921
        self.applicable_target = target
 
1922
        self.name = name