1
# Copyright (c) 2013 Hortonworks, Inc.
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
7
# http://www.apache.org/licenses/LICENSE-2.0
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
17
import pkg_resources as pkg
19
from sahara.plugins.general import exceptions as ex
20
from sahara.plugins.hdp import clusterspec as cs
21
from sahara.plugins.hdp.versions.version_2_0_6 import services as s2
22
from sahara.plugins import provisioning
23
from sahara.tests.unit import base as sahara_base
24
import sahara.tests.unit.plugins.hdp.hdp_test_base as base
25
from sahara.topology import topology_helper as th
26
from sahara import version
29
class TestCONF(object):
30
def __init__(self, enable_data_locality, enable_hypervisor_awareness):
31
self.enable_data_locality = enable_data_locality
32
self.enable_hypervisor_awareness = enable_hypervisor_awareness
35
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
36
base.get_instance_info)
37
@mock.patch('sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
38
'_get_swift_properties',
40
class ClusterSpecTestForHDP2(sahara_base.SaharaTestCase):
41
service_validators = {}
44
super(ClusterSpecTestForHDP2, self).setUp()
45
self.service_validators['YARN'] = self._assert_yarn
46
self.service_validators['HDFS'] = self._assert_hdfs
47
self.service_validators['MAPREDUCE2'] = self._assert_mrv2
48
self.service_validators['GANGLIA'] = self._assert_ganglia
49
self.service_validators['NAGIOS'] = self._assert_nagios
50
self.service_validators['AMBARI'] = self._assert_ambari
51
self.service_validators['PIG'] = self._assert_pig
52
self.service_validators['HIVE'] = self._assert_hive
53
self.service_validators['HCATALOG'] = self._assert_hcatalog
54
self.service_validators['ZOOKEEPER'] = self._assert_zookeeper
55
self.service_validators['WEBHCAT'] = self._assert_webhcat
56
self.service_validators['OOZIE'] = self._assert_oozie
57
self.service_validators['SQOOP'] = self._assert_sqoop
58
self.service_validators['HBASE'] = self._assert_hbase
59
self.service_validators['HUE'] = self._assert_hue
61
def test_parse_default_with_cluster(self, patched):
62
cluster_config_file = pkg.resource_string(
63
version.version_info.package,
64
'plugins/hdp/versions/version_2_0_6/resources/'
65
'default-cluster.template')
67
server1 = base.TestServer('host1', 'test-master', '11111', 3,
68
'111.11.1111', '222.11.1111')
69
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
70
'222.22.2222', '333.22.2222')
72
node_group1 = TestNodeGroup(
73
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
74
"HISTORYSERVER", "SECONDARY_NAMENODE",
75
"GANGLIA_SERVER", "GANGLIA_MONITOR",
76
"NAGIOS_SERVER", "AMBARI_SERVER",
77
"AMBARI_AGENT", "ZOOKEEPER_SERVER"])
78
node_group2 = TestNodeGroup('slave', [server2], ['NODEMANAGER',
80
cluster = base.TestCluster([node_group1, node_group2])
82
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
83
cluster_config.create_operational_config(cluster, [])
85
self._assert_services(cluster_config.services)
86
self._assert_configurations(cluster_config.configurations)
88
node_groups = cluster_config.node_groups
89
self.assertEqual(2, len(node_groups))
90
self.assertIn('master', node_groups)
91
self.assertIn('slave', node_groups)
93
master_node_group = node_groups['master']
94
self.assertEqual('master', master_node_group.name)
95
self.assertEqual(13, len(master_node_group.components))
96
self.assertIn('NAMENODE', master_node_group.components)
97
self.assertIn('RESOURCEMANAGER', master_node_group.components)
98
self.assertIn('HISTORYSERVER', master_node_group.components)
99
self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
100
self.assertIn('GANGLIA_SERVER', master_node_group.components)
101
self.assertIn('GANGLIA_MONITOR', master_node_group.components)
102
self.assertIn('NAGIOS_SERVER', master_node_group.components)
103
self.assertIn('AMBARI_SERVER', master_node_group.components)
104
self.assertIn('AMBARI_AGENT', master_node_group.components)
105
self.assertIn('YARN_CLIENT', master_node_group.components)
106
self.assertIn('ZOOKEEPER_SERVER', master_node_group.components)
108
slave_node_group = node_groups['slave']
109
self.assertEqual('slave', slave_node_group.name)
110
self.assertIn('NODEMANAGER', slave_node_group.components)
112
return cluster_config
114
def test_determine_component_hosts(self, patched):
115
cluster_config_file = pkg.resource_string(
116
version.version_info.package,
117
'plugins/hdp/versions/version_2_0_6/resources/'
118
'default-cluster.template')
120
server1 = base.TestServer('ambari_machine', 'master', '11111', 3,
121
'111.11.1111', '222.11.1111')
122
server2 = base.TestServer('host2', 'slave', '11111', 3, '222.22.2222',
124
server3 = base.TestServer('host3', 'slave', '11111', 3, '222.22.2223',
127
node_group1 = TestNodeGroup(
128
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
129
"HISTORYSERVER", "SECONDARY_NAMENODE",
130
"GANGLIA_SERVER", "NAGIOS_SERVER",
131
"AMBARI_SERVER", "ZOOKEEPER_SERVER"])
132
node_group2 = TestNodeGroup(
133
'slave', [server2], ["DATANODE", "NODEMANAGER",
134
"HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
136
node_group3 = TestNodeGroup(
137
'slave2', [server3], ["DATANODE", "NODEMANAGER",
138
"HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
140
cluster = base.TestCluster([node_group1, node_group2, node_group3])
142
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
143
cluster_config.create_operational_config(cluster, [])
145
hosts = cluster_config.determine_component_hosts('AMBARI_SERVER')
146
self.assertEqual(1, len(hosts))
147
self.assertEqual('ambari_machine', hosts.pop().fqdn())
149
hosts = cluster_config.determine_component_hosts('DATANODE')
150
self.assertEqual(2, len(hosts))
151
datanodes = set([server2.fqdn(), server3.fqdn()])
152
host_fqdn = set([hosts.pop().fqdn(), hosts.pop().fqdn()])
153
# test intersection is both servers
154
self.assertEqual(datanodes, host_fqdn & datanodes)
156
def test_finalize_configuration(self, patched):
157
patched.return_value = [{'name': 'swift.prop1',
158
'value': 'swift_prop_value'},
159
{'name': 'swift.prop2',
160
'value': 'swift_prop_value2'}]
161
cluster_config_file = pkg.resource_string(
162
version.version_info.package,
163
'plugins/hdp/versions/version_2_0_6/resources/'
164
'default-cluster.template')
166
master_host = base.TestServer(
167
'master.novalocal', 'master', '11111', 3,
168
'111.11.1111', '222.11.1111')
170
jt_host = base.TestServer(
171
'jt_host.novalocal', 'jt', '11111', 3,
172
'111.11.2222', '222.11.2222')
174
nn_host = base.TestServer(
175
'nn_host.novalocal', 'nn', '11111', 3,
176
'111.11.3333', '222.11.3333')
178
snn_host = base.TestServer(
179
'snn_host.novalocal', 'jt', '11111', 3,
180
'111.11.4444', '222.11.4444')
182
hive_host = base.TestServer(
183
'hive_host.novalocal', 'hive', '11111', 3,
184
'111.11.5555', '222.11.5555')
186
hive_ms_host = base.TestServer(
187
'hive_ms_host.novalocal', 'hive_ms', '11111', 3,
188
'111.11.6666', '222.11.6666')
190
hive_mysql_host = base.TestServer(
191
'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3,
192
'111.11.7777', '222.11.7777')
194
hcat_host = base.TestServer(
195
'hcat_host.novalocal', 'hcat', '11111', 3,
196
'111.11.8888', '222.11.8888')
198
zk_host = base.TestServer(
199
'zk_host.novalocal', 'zk', '11111', 3,
200
'111.11.9999', '222.11.9999')
202
oozie_host = base.TestServer(
203
'oozie_host.novalocal', 'oozie', '11111', 3,
204
'111.11.9999', '222.11.9999')
206
slave_host = base.TestServer(
207
'slave1.novalocal', 'slave', '11111', 3,
208
'222.22.6666', '333.22.6666')
210
master_ng = TestNodeGroup(
211
'master', [master_host], ["GANGLIA_SERVER",
217
jt_ng = TestNodeGroup(
218
'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR",
219
"HISTORYSERVER", "AMBARI_AGENT"])
221
nn_ng = TestNodeGroup(
222
'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR",
225
snn_ng = TestNodeGroup(
226
'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR",
229
hive_ng = TestNodeGroup(
230
'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR",
233
hive_ms_ng = TestNodeGroup(
234
'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR",
237
hive_mysql_ng = TestNodeGroup(
238
'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR",
241
hcat_ng = TestNodeGroup(
242
'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR",
245
zk_ng = TestNodeGroup(
246
'zk', [zk_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
249
oozie_ng = TestNodeGroup(
250
'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR",
252
slave_ng = TestNodeGroup(
253
'slave', [slave_host], ["DATANODE", "NODEMANAGER",
254
"GANGLIA_MONITOR", "HDFS_CLIENT",
255
"MAPREDUCE2_CLIENT", "OOZIE_CLIENT",
258
user_input_config = TestUserInputConfig(
259
'core-site', 'cluster', 'fs.defaultFS')
260
user_input = provisioning.UserInput(
261
user_input_config, 'hdfs://nn_dif_host.novalocal:8020')
263
cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng,
264
hive_ms_ng, hive_mysql_ng,
265
hcat_ng, zk_ng, oozie_ng, slave_ng])
266
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
267
cluster_config.create_operational_config(cluster, [user_input])
268
config = cluster_config.configurations
270
# for this value, validating that user inputs override configured
271
# values, whether they are processed by runtime or not
272
self.assertEqual(config['core-site']['fs.defaultFS'],
273
'hdfs://nn_dif_host.novalocal:8020')
275
self.assertEqual(config['mapred-site']
276
['mapreduce.jobhistory.webapp.address'],
277
'jt_host.novalocal:19888')
279
self.assertEqual(config['hdfs-site']['dfs.namenode.http-address'],
280
'nn_host.novalocal:50070')
281
self.assertEqual(config['hdfs-site']
282
['dfs.namenode.secondary.http-address'],
283
'snn_host.novalocal:50090')
284
self.assertEqual(config['hdfs-site']['dfs.namenode.https-address'],
285
'nn_host.novalocal:50470')
287
self.assertEqual(config['global']['hive_hostname'],
288
'hive_host.novalocal')
289
self.assertEqual(config['core-site']['hadoop.proxyuser.hive.hosts'],
290
'hive_host.novalocal')
291
self.assertEqual(config['hive-site']
292
['javax.jdo.option.ConnectionURL'],
293
'jdbc:mysql://hive_mysql_host.novalocal/hive?'
294
'createDatabaseIfNotExist=true')
295
self.assertEqual(config['hive-site']['hive.metastore.uris'],
296
'thrift://hive_ms_host.novalocal:9083')
298
'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in
299
config['webhcat-site']['templeton.hive.properties'])
300
self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.hosts'],
301
'hcat_host.novalocal')
302
self.assertEqual(config['webhcat-site']['templeton.zookeeper.hosts'],
303
'zk_host.novalocal:2181')
305
self.assertEqual(config['oozie-site']['oozie.base.url'],
306
'http://oozie_host.novalocal:11000/oozie')
307
self.assertEqual(config['global']['oozie_hostname'],
308
'oozie_host.novalocal')
309
self.assertEqual(config['core-site']['hadoop.proxyuser.oozie.hosts'],
310
'oozie_host.novalocal,222.11.9999,111.11.9999')
312
# test swift properties
313
self.assertEqual('swift_prop_value',
314
config['core-site']['swift.prop1'])
315
self.assertEqual('swift_prop_value2',
316
config['core-site']['swift.prop2'])
318
def test_finalize_configuration_with_hue(self, patched):
319
patched.return_value = [{'name': 'swift.prop1',
320
'value': 'swift_prop_value'},
321
{'name': 'swift.prop2',
322
'value': 'swift_prop_value2'}]
323
cluster_config_file = pkg.resource_string(
324
version.version_info.package,
325
'plugins/hdp/versions/version_2_0_6/resources/'
326
'default-cluster.template')
328
master_host = base.TestServer(
329
'master.novalocal', 'master', '11111', 3,
330
'111.11.1111', '222.11.1111')
332
jt_host = base.TestServer(
333
'jt_host.novalocal', 'jt', '11111', 3,
334
'111.11.2222', '222.11.2222')
336
nn_host = base.TestServer(
337
'nn_host.novalocal', 'nn', '11111', 3,
338
'111.11.3333', '222.11.3333')
340
snn_host = base.TestServer(
341
'snn_host.novalocal', 'jt', '11111', 3,
342
'111.11.4444', '222.11.4444')
344
hive_host = base.TestServer(
345
'hive_host.novalocal', 'hive', '11111', 3,
346
'111.11.5555', '222.11.5555')
348
hive_ms_host = base.TestServer(
349
'hive_ms_host.novalocal', 'hive_ms', '11111', 3,
350
'111.11.6666', '222.11.6666')
352
hive_mysql_host = base.TestServer(
353
'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3,
354
'111.11.7777', '222.11.7777')
356
hcat_host = base.TestServer(
357
'hcat_host.novalocal', 'hcat', '11111', 3,
358
'111.11.8888', '222.11.8888')
360
zk_host = base.TestServer(
361
'zk_host.novalocal', 'zk', '11111', 3,
362
'111.11.9999', '222.11.9999')
364
oozie_host = base.TestServer(
365
'oozie_host.novalocal', 'oozie', '11111', 3,
366
'111.11.9999', '222.11.9999')
368
slave_host = base.TestServer(
369
'slave1.novalocal', 'slave', '11111', 3,
370
'222.22.6666', '333.22.6666')
372
master_ng = TestNodeGroup(
373
'master', [master_host], ["GANGLIA_SERVER",
379
jt_ng = TestNodeGroup(
380
'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR",
381
"HISTORYSERVER", "AMBARI_AGENT"])
383
nn_ng = TestNodeGroup(
384
'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR",
387
snn_ng = TestNodeGroup(
388
'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR",
391
hive_ng = TestNodeGroup(
392
'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR",
395
hive_ms_ng = TestNodeGroup(
396
'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR",
399
hive_mysql_ng = TestNodeGroup(
400
'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR",
403
hcat_ng = TestNodeGroup(
404
'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR",
407
zk_ng = TestNodeGroup(
408
'zk', [zk_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
411
oozie_ng = TestNodeGroup(
412
'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR",
414
slave_ng = TestNodeGroup(
415
'slave', [slave_host], ["DATANODE", "NODEMANAGER",
416
"GANGLIA_MONITOR", "HDFS_CLIENT",
417
"MAPREDUCE2_CLIENT", "OOZIE_CLIENT",
418
"AMBARI_AGENT", "HUE"])
420
user_input_config = TestUserInputConfig(
421
'core-site', 'cluster', 'fs.defaultFS')
422
user_input = provisioning.UserInput(
423
user_input_config, 'hdfs://nn_dif_host.novalocal:8020')
425
cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng,
426
hive_ms_ng, hive_mysql_ng,
427
hcat_ng, zk_ng, oozie_ng, slave_ng])
428
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
429
cluster_config.create_operational_config(cluster, [user_input])
430
config = cluster_config.configurations
432
# for this value, validating that user inputs override configured
433
# values, whether they are processed by runtime or not
434
self.assertEqual(config['core-site']['fs.defaultFS'],
435
'hdfs://nn_dif_host.novalocal:8020')
437
self.assertEqual(config['mapred-site']
438
['mapreduce.jobhistory.webapp.address'],
439
'jt_host.novalocal:19888')
441
self.assertEqual(config['hdfs-site']['dfs.namenode.http-address'],
442
'nn_host.novalocal:50070')
443
self.assertEqual(config['hdfs-site']
444
['dfs.namenode.secondary.http-address'],
445
'snn_host.novalocal:50090')
446
self.assertEqual(config['hdfs-site']['dfs.namenode.https-address'],
447
'nn_host.novalocal:50470')
448
self.assertEqual(config['hdfs-site']['dfs.support.broken.append'],
450
self.assertEqual(config['hdfs-site']['dfs.webhdfs.enabled'],
453
self.assertEqual(config['global']['hive_hostname'],
454
'hive_host.novalocal')
455
self.assertEqual(config['core-site']['hadoop.proxyuser.hive.hosts'],
456
'hive_host.novalocal')
457
self.assertEqual(config['hive-site']
458
['javax.jdo.option.ConnectionURL'],
459
'jdbc:mysql://hive_mysql_host.novalocal/hive?'
460
'createDatabaseIfNotExist=true')
461
self.assertEqual(config['hive-site']['hive.metastore.uris'],
462
'thrift://hive_ms_host.novalocal:9083')
464
'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in
465
config['webhcat-site']['templeton.hive.properties'])
466
self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.hosts'],
468
self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.groups'],
470
self.assertEqual(config['core-site']['hadoop.proxyuser.hue.hosts'],
472
self.assertEqual(config['core-site']['hadoop.proxyuser.hue.groups'],
474
self.assertEqual(config['webhcat-site']['templeton.zookeeper.hosts'],
475
'zk_host.novalocal:2181')
476
self.assertEqual(config['webhcat-site']['webhcat.proxyuser.hue.hosts'],
478
self.assertEqual(config['webhcat-site']
479
['webhcat.proxyuser.hue.groups'],
482
self.assertEqual(config['oozie-site']['oozie.base.url'],
483
'http://oozie_host.novalocal:11000/oozie')
484
self.assertEqual(config['oozie-site']
485
['oozie.service.ProxyUserService.proxyuser.hue.'
487
self.assertEqual(config['oozie-site']
488
['oozie.service.ProxyUserService.proxyuser.hue.'
490
self.assertEqual(config['global']['oozie_hostname'],
491
'oozie_host.novalocal')
492
self.assertEqual(config['core-site']['hadoop.proxyuser.oozie.hosts'],
493
'oozie_host.novalocal,222.11.9999,111.11.9999')
495
# test swift properties
496
self.assertEqual('swift_prop_value',
497
config['core-site']['swift.prop1'])
498
self.assertEqual('swift_prop_value2',
499
config['core-site']['swift.prop2'])
501
def test__determine_deployed_services(self, nova_mock):
502
cluster_config_file = pkg.resource_string(
503
version.version_info.package,
504
'plugins/hdp/versions/version_2_0_6/resources/'
505
'default-cluster.template')
507
master_host = base.TestServer(
508
'master.novalocal', 'master', '11111', 3,
509
'111.11.1111', '222.11.1111')
511
jt_host = base.TestServer(
512
'jt_host.novalocal', 'jt', '11111', 3,
513
'111.11.2222', '222.11.2222')
515
nn_host = base.TestServer(
516
'nn_host.novalocal', 'nn', '11111', 3,
517
'111.11.3333', '222.11.3333')
519
snn_host = base.TestServer(
520
'snn_host.novalocal', 'jt', '11111', 3,
521
'111.11.4444', '222.11.4444')
523
slave_host = base.TestServer(
524
'slave1.novalocal', 'slave', '11111', 3,
525
'222.22.6666', '333.22.6666')
527
master_ng = TestNodeGroup(
528
'master', [master_host],
530
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
531
'AMBARI_SERVER', 'AMBARI_AGENT', 'ZOOKEEPER_SERVER'])
532
jt_ng = TestNodeGroup('jt', [jt_host], ["RESOURCEMANAGER",
536
nn_ng = TestNodeGroup('nn', [nn_host], ["NAMENODE",
537
"GANGLIA_MONITOR", "AMBARI_AGENT"])
538
snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE",
539
"GANGLIA_MONITOR", "AMBARI_AGENT"])
540
slave_ng = TestNodeGroup(
541
'slave', [slave_host],
542
["DATANODE", "NODEMANAGER",
543
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE2_CLIENT",
546
cluster = base.TestCluster([master_ng, jt_ng, nn_ng,
548
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
549
cluster_config.create_operational_config(cluster, [])
550
services = cluster_config.services
551
for service in services:
552
if service.name in ['YARN', 'HDFS', 'MAPREDUCE2', 'GANGLIA',
553
'AMBARI', 'NAGIOS', 'ZOOKEEPER']:
554
self.assertTrue(service.deployed)
556
self.assertFalse(service.deployed)
558
def test_ambari_rpm_path(self, patched):
559
cluster_config_file = pkg.resource_string(
560
version.version_info.package,
561
'plugins/hdp/versions/version_2_0_6/resources/'
562
'default-cluster.template')
563
cluster_spec = cs.ClusterSpec(cluster_config_file, version='2.0.6')
565
ambari_config = cluster_spec.configurations['ambari']
566
rpm = ambari_config.get('rpm', None)
567
self.assertEqual('http://s3.amazonaws.com/'
568
'public-repo-1.hortonworks.com/ambari/centos6/'
569
'1.x/updates/1.6.0/ambari.repo', rpm)
571
def test_fs_umask(self, patched):
574
s2.CONF = TestCONF(False, False)
575
cluster_config_file = pkg.resource_string(
576
version.version_info.package,
577
'plugins/hdp/versions/version_2_0_6/resources/'
578
'default-cluster.template')
580
server1 = base.TestServer('host1', 'test-master', '11111', 3,
581
'111.11.1111', '222.11.1111')
582
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
583
'222.22.2222', '333.22.2222')
585
node_group1 = TestNodeGroup(
586
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
587
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
588
"GANGLIA_MONITOR", "NAGIOS_SERVER",
589
"AMBARI_SERVER", "AMBARI_AGENT",
590
"HISTORYSERVER", "ZOOKEEPER_SERVER"])
591
node_group2 = TestNodeGroup(
592
'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
595
cluster = base.TestCluster([node_group1, node_group2])
596
cluster_config = cs.ClusterSpec(cluster_config_file, '2.0.6')
597
cluster_config.create_operational_config(cluster, [])
601
cluster_config.configurations['hdfs-site']
602
['fs.permissions.umask-mode'])
606
def test_parse_default(self, patched):
607
cluster_config_file = pkg.resource_string(
608
version.version_info.package,
609
'plugins/hdp/versions/version_2_0_6/resources/'
610
'default-cluster.template')
612
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
614
self._assert_services(cluster_config.services)
615
self._assert_configurations(cluster_config.configurations)
617
node_groups = cluster_config.node_groups
618
self.assertEqual(2, len(node_groups))
619
master_node_group = node_groups['master']
620
self.assertEqual('master', master_node_group.name)
621
self.assertIsNone(master_node_group.predicate)
622
self.assertEqual('1', master_node_group.cardinality)
623
self.assertEqual(8, len(master_node_group.components))
624
self.assertIn('NAMENODE', master_node_group.components)
625
self.assertIn('RESOURCEMANAGER', master_node_group.components)
626
self.assertIn('HISTORYSERVER', master_node_group.components)
627
self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
628
self.assertIn('GANGLIA_SERVER', master_node_group.components)
629
self.assertIn('NAGIOS_SERVER', master_node_group.components)
630
self.assertIn('AMBARI_SERVER', master_node_group.components)
631
self.assertIn('ZOOKEEPER_SERVER', master_node_group.components)
633
slave_node_group = node_groups['slave']
634
self.assertEqual('slave', slave_node_group.name)
635
self.assertIsNone(slave_node_group.predicate)
636
self.assertEqual('1+', slave_node_group.cardinality)
637
self.assertEqual(5, len(slave_node_group.components))
638
self.assertIn('DATANODE', slave_node_group.components)
639
self.assertIn('NODEMANAGER', slave_node_group.components)
640
self.assertIn('HDFS_CLIENT', slave_node_group.components)
641
self.assertIn('YARN_CLIENT', slave_node_group.components)
642
self.assertIn('MAPREDUCE2_CLIENT', slave_node_group.components)
644
return cluster_config
646
def test_ambari_rpm(self, patched):
647
cluster_config_file = pkg.resource_string(
648
version.version_info.package,
649
'plugins/hdp/versions/version_2_0_6/resources/'
650
'default-cluster.template')
652
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
654
self._assert_configurations(cluster_config.configurations)
655
ambari_config = cluster_config.configurations['ambari']
656
self.assertIsNotNone('no rpm uri found',
657
ambari_config.get('rpm', None))
659
def test_normalize(self, patched):
660
cluster_config_file = pkg.resource_string(
661
version.version_info.package,
662
'plugins/hdp/versions/version_2_0_6/resources/'
663
'default-cluster.template')
665
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
666
cluster = cluster_config.normalize()
668
configs = cluster.cluster_configs
669
contains_dfs_datanode_http_address = False
670
contains_staging_dir = False
671
contains_mapred_user = False
673
for entry in configs:
674
config = entry.config
675
# assert some random configurations across targets
676
if config.name == 'dfs.datanode.http.address':
677
contains_dfs_datanode_http_address = True
678
self.assertEqual('string', config.type)
679
self.assertEqual('0.0.0.0:50075', config.default_value)
680
self.assertEqual('HDFS', config.applicable_target)
682
if config.name == 'yarn.app.mapreduce.am.staging-dir':
683
contains_staging_dir = True
684
self.assertEqual('string', config.type)
687
config.default_value)
688
self.assertEqual('MAPREDUCE2',
689
config.applicable_target)
691
if config.name == 'mapred_user':
692
contains_mapred_user = True
693
self.assertEqual('string', config.type)
694
self.assertEqual('mapred', config.default_value)
695
self.assertEqual('MAPREDUCE2', config.applicable_target)
697
# print 'Config: name: {0}, type:{1},
698
# default value:{2}, target:{3}, Value:{4}'.format(
699
# config.name, config.type,
700
# config.default_value,
701
# config.applicable_target, entry.value)
703
self.assertTrue(contains_dfs_datanode_http_address)
704
self.assertTrue(contains_staging_dir)
705
self.assertTrue(contains_mapred_user)
706
node_groups = cluster.node_groups
707
self.assertEqual(2, len(node_groups))
708
contains_master_group = False
709
contains_slave_group = False
711
node_group = node_groups[i]
712
components = node_group.node_processes
713
if node_group.name == "master":
714
contains_master_group = True
715
self.assertEqual(8, len(components))
716
self.assertIn('NAMENODE', components)
717
self.assertIn('RESOURCEMANAGER', components)
718
self.assertIn('HISTORYSERVER', components)
719
self.assertIn('SECONDARY_NAMENODE', components)
720
self.assertIn('GANGLIA_SERVER', components)
721
self.assertIn('NAGIOS_SERVER', components)
722
self.assertIn('AMBARI_SERVER', components)
723
self.assertIn('ZOOKEEPER_SERVER', components)
724
# TODO(jspeidel): node configs
725
# TODO(jspeidel): vm_requirements
726
elif node_group.name == 'slave':
727
contains_slave_group = True
728
self.assertEqual(5, len(components))
729
self.assertIn('DATANODE', components)
730
self.assertIn('NODEMANAGER', components)
731
self.assertIn('HDFS_CLIENT', components)
732
self.assertIn('YARN_CLIENT', components)
733
self.assertIn('MAPREDUCE2_CLIENT', components)
734
# TODO(jspeidel): node configs
735
# TODO(jspeidel): vm requirements
737
self.fail('Unexpected node group: {0}'.format(node_group.name))
738
self.assertTrue(contains_master_group)
739
self.assertTrue(contains_slave_group)
741
def test_existing_config_item_in_top_level_within_blueprint(self, patched):
742
cluster_config_file = pkg.resource_string(
743
version.version_info.package,
744
'plugins/hdp/versions/version_2_0_6/resources/'
745
'default-cluster.template')
747
user_input_config = TestUserInputConfig(
748
'global', 'OOZIE', 'oozie_log_dir')
749
user_input = provisioning.UserInput(user_input_config,
752
server1 = base.TestServer('host1', 'test-master', '11111', 3,
753
'111.11.1111', '222.11.1111')
754
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
755
'222.22.2222', '333.22.2222')
757
node_group1 = TestNodeGroup(
758
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
759
"HISTORYSERVER", "SECONDARY_NAMENODE",
760
"GANGLIA_SERVER", "GANGLIA_MONITOR",
761
"NAGIOS_SERVER", "AMBARI_SERVER",
762
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
763
node_group2 = TestNodeGroup(
764
'slave', [server2], ["NODEMANAGER", "DATANODE",
765
"AMBARI_AGENT", "GANGLIA_MONITOR"])
767
cluster = base.TestCluster([node_group1, node_group2])
768
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
769
cluster_config.create_operational_config(cluster, [user_input])
770
self.assertEqual('/some/new/path', cluster_config.configurations
771
['global']['oozie_log_dir'])
773
def test_new_config_item_in_top_level_within_blueprint(self, patched):
774
cluster_config_file = pkg.resource_string(
775
version.version_info.package,
776
'plugins/hdp/versions/version_2_0_6/resources/'
777
'default-cluster.template')
779
user_input_config = TestUserInputConfig(
780
'global', 'general', 'new_property')
781
user_input = provisioning.UserInput(user_input_config, 'foo')
783
server1 = base.TestServer('host1', 'test-master', '11111', 3,
784
'111.11.1111', '222.11.1111')
785
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
786
'222.22.2222', '333.22.2222')
788
node_group1 = TestNodeGroup(
790
["NAMENODE", "RESOURCEMANAGER",
791
"HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
792
"GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
793
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
794
node_group2 = TestNodeGroup(
795
'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
798
cluster = base.TestCluster([node_group1, node_group2])
799
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
800
cluster_config.create_operational_config(cluster, [user_input])
802
'foo', cluster_config.configurations['global']['new_property'])
804
def test_topology_configuration_no_hypervisor(self, patched):
808
s2.CONF = TestCONF(True, False)
809
th.CONF = TestCONF(True, False)
810
cluster_config_file = pkg.resource_string(
811
version.version_info.package,
812
'plugins/hdp/versions/version_2_0_6/resources/'
813
'default-cluster.template')
815
server1 = base.TestServer('host1', 'test-master', '11111', 3,
816
'111.11.1111', '222.11.1111')
817
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
818
'222.22.2222', '333.22.2222')
820
node_group1 = TestNodeGroup(
821
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
822
"HISTORYSERVER", "SECONDARY_NAMENODE",
823
"GANGLIA_SERVER", "GANGLIA_MONITOR",
824
"NAGIOS_SERVER", "AMBARI_SERVER",
825
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
826
node_group2 = TestNodeGroup(
827
'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
830
cluster = base.TestCluster([node_group1, node_group2])
831
cluster_config = cs.ClusterSpec(cluster_config_file,
833
cluster_config.create_operational_config(cluster, [])
836
'org.apache.hadoop.net.NetworkTopology',
837
cluster_config.configurations['core-site']
838
['net.topology.impl'])
841
cluster_config.configurations['core-site']
842
['net.topology.nodegroup.aware'])
844
'org.apache.hadoop.hdfs.server.namenode.'
845
'BlockPlacementPolicyWithNodeGroup',
846
cluster_config.configurations['core-site']
847
['dfs.block.replicator.classname'])
850
cluster_config.configurations['core-site']
851
['fs.swift.service.sahara.location-aware'])
853
'org.apache.hadoop.net.ScriptBasedMapping',
854
cluster_config.configurations['core-site']
855
['net.topology.node.switch.mapping.impl'])
857
'/etc/hadoop/conf/topology.sh',
858
cluster_config.configurations['core-site']
859
['net.topology.script.file.name'])
864
cluster_config.configurations['mapred-site']
865
['mapred.jobtracker.nodegroup.aware'])
868
cluster_config.configurations['mapred-site']
869
['mapred.task.cache.levels'])
871
'org.apache.hadoop.mapred.JobSchedulableWithNodeGroup',
872
cluster_config.configurations['mapred-site']
873
['mapred.jobtracker.jobSchedulable'])
878
def test_topology_configuration_with_hypervisor(self, patched):
881
s2.CONF = TestCONF(True, True)
882
cluster_config_file = pkg.resource_string(
883
version.version_info.package,
884
'plugins/hdp/versions/version_2_0_6/resources/'
885
'default-cluster.template')
887
server1 = base.TestServer('host1', 'test-master', '11111', 3,
888
'111.11.1111', '222.11.1111')
889
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
890
'222.22.2222', '333.22.2222')
892
node_group1 = TestNodeGroup(
893
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
894
"HISTORYSERVER", "SECONDARY_NAMENODE",
895
"GANGLIA_SERVER", "GANGLIA_MONITOR",
896
"NAGIOS_SERVER", "AMBARI_SERVER",
897
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
898
node_group2 = TestNodeGroup(
899
'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
902
cluster = base.TestCluster([node_group1, node_group2])
903
cluster_config = cs.ClusterSpec(cluster_config_file,
905
cluster_config.create_operational_config(cluster, [])
908
'org.apache.hadoop.net.NetworkTopologyWithNodeGroup',
909
cluster_config.configurations['core-site']
910
['net.topology.impl'])
914
def test_update_ambari_admin_user(self, patched):
915
cluster_config_file = pkg.resource_string(
916
version.version_info.package,
917
'plugins/hdp/versions/version_2_0_6/resources/'
918
'default-cluster.template')
920
user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
922
user_input = provisioning.UserInput(user_input_config, 'new-user')
924
server1 = base.TestServer('host1', 'test-master', '11111', 3,
925
'111.11.1111', '222.11.1111')
926
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
927
'222.22.2222', '333.22.2222')
929
node_group1 = TestNodeGroup(
935
"SECONDARY_NAMENODE",
942
node_group2 = TestNodeGroup(
950
cluster = base.TestCluster([node_group1, node_group2])
951
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
952
cluster_config.create_operational_config(cluster, [user_input])
953
ambari_service = next(service for service in cluster_config.services
954
if service.name == 'AMBARI')
955
users = ambari_service.users
956
self.assertEqual(1, len(users))
957
self.assertEqual('new-user', users[0].name)
959
def test_update_ambari_admin_password(self, patched):
960
cluster_config_file = pkg.resource_string(
961
version.version_info.package,
962
'plugins/hdp/versions/version_2_0_6/resources/'
963
'default-cluster.template')
965
user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
966
'ambari.admin.password')
967
user_input = provisioning.UserInput(user_input_config, 'new-pwd')
969
server1 = base.TestServer('host1', 'test-master', '11111', 3,
970
'111.11.1111', '222.11.1111')
971
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
972
'222.22.2222', '333.22.2222')
974
node_group1 = TestNodeGroup(
980
"SECONDARY_NAMENODE",
987
node_group2 = TestNodeGroup(
995
cluster = base.TestCluster([node_group1, node_group2])
996
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
997
cluster_config.create_operational_config(cluster, [user_input])
998
ambari_service = next(service for service in cluster_config.services
999
if service.name == 'AMBARI')
1000
users = ambari_service.users
1001
self.assertEqual(1, len(users))
1002
self.assertEqual('new-pwd', users[0].password)
1004
def test_update_ambari_admin_user_and_password(self, patched):
1005
cluster_config_file = pkg.resource_string(
1006
version.version_info.package,
1007
'plugins/hdp/versions/version_2_0_6/resources/'
1008
'default-cluster.template')
1010
user_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
1011
'ambari.admin.user')
1012
pwd_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
1013
'ambari.admin.password')
1014
user_user_input = provisioning.UserInput(user_user_input_config,
1016
pwd_user_input = provisioning.UserInput(pwd_user_input_config,
1019
server1 = base.TestServer('host1', 'test-master', '11111', 3,
1020
'111.11.1111', '222.11.1111')
1021
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
1022
'222.22.2222', '333.22.2222')
1024
node_group1 = TestNodeGroup(
1025
'one', [server1], ["NAMENODE", "RESOURCEMANAGER",
1026
"HISTORYSERVER", "SECONDARY_NAMENODE",
1027
"GANGLIA_SERVER", "GANGLIA_MONITOR",
1028
"NAGIOS_SERVER", "AMBARI_SERVER",
1029
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
1030
node_group2 = TestNodeGroup(
1031
'two', [server2], ["NODEMANAGER", "DATANODE",
1032
"AMBARI_AGENT", "GANGLIA_MONITOR"])
1034
cluster = base.TestCluster([node_group1, node_group2])
1035
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
1036
cluster_config.create_operational_config(
1037
cluster, [user_user_input, pwd_user_input])
1038
ambari_service = next(service for service in cluster_config.services
1039
if service.name == 'AMBARI')
1040
users = ambari_service.users
1041
self.assertEqual(1, len(users))
1042
self.assertEqual('new-admin_user', users[0].name)
1043
self.assertEqual('new-admin_pwd', users[0].password)
1045
def test_validate_missing_hdfs(self, patched):
1046
server = base.TestServer('host1', 'slave', '11111', 3,
1047
'111.11.1111', '222.22.2222')
1048
server2 = base.TestServer('host2', 'master', '11112', 3,
1049
'111.11.1112', '222.22.2223')
1051
node_group = TestNodeGroup(
1052
'slave', [server], ["NODEMANAGER", "MAPREDUCE2_CLIENT",
1055
node_group2 = TestNodeGroup(
1056
'master', [server2], ["RESOURCEMANAGER", "ZOOKEEPER_SERVER"])
1058
cluster = base.TestCluster([node_group, node_group2])
1059
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1060
# should fail due to missing hdfs service
1062
cluster_config.create_operational_config(cluster, [])
1063
self.fail('Validation should have thrown an exception')
1064
except ex.RequiredServiceMissingException:
1068
def test_validate_missing_mr2(self, patched):
1069
server = base.TestServer('host1', 'slave', '11111', 3,
1070
'111.11.1111', '222.22.2222')
1071
server2 = base.TestServer('host2', 'master', '11112', 3,
1072
'111.11.1112', '222.22.2223')
1074
node_group = TestNodeGroup(
1075
'slave', [server], ["DATANODE"])
1077
node_group2 = TestNodeGroup(
1078
'master', [server2], ["NAMENODE", "ZOOKEEPER_SERVER"])
1080
cluster = base.TestCluster([node_group, node_group2])
1081
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1082
# should fail due to missing mr service
1084
cluster_config.create_operational_config(cluster, [])
1085
self.fail('Validation should have thrown an exception')
1086
except ex.RequiredServiceMissingException:
1090
def test_validate_missing_ambari(self, patched):
1091
server = base.TestServer('host1', 'slave', '11111', 3,
1092
'111.11.1111', '222.22.2222')
1093
server2 = base.TestServer('host2', 'master', '11112', 3,
1094
'111.11.1112', '222.22.2223')
1096
node_group = TestNodeGroup(
1097
'slave', [server], ["NAMENODE", "RESOURCEMANAGER",
1098
"ZOOKEEPER_SERVER"])
1100
node_group2 = TestNodeGroup(
1101
'master', [server2], ["DATANODE", "NODEMANAGER"])
1103
cluster = base.TestCluster([node_group, node_group2])
1104
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1105
# should fail due to missing ambari service
1107
cluster_config.create_operational_config(cluster, [])
1108
self.fail('Validation should have thrown an exception')
1109
except ex.RequiredServiceMissingException:
1113
# TODO(jspeidel): move validate_* to test_services when validate
1114
# is called independently of cluspterspec
1115
def test_validate_hdfs(self, patched):
1116
server = base.TestServer('host1', 'slave', '11111', 3,
1117
'111.11.1111', '222.22.2222')
1118
server2 = base.TestServer('host2', 'master', '11112', 3,
1119
'111.11.1112', '222.22.2223')
1121
node_group = TestNodeGroup(
1122
'slave', [server], ["DATANODE", "NODEMANAGER",
1123
"HDFS_CLIENT", "MAPREDUCE2_CLIENT"], 1)
1125
node_group2 = TestNodeGroup(
1126
'master', [server2], ["RESOURCEMANAGER", "AMBARI_SERVER",
1127
"ZOOKEEPER_SERVER"])
1129
cluster = base.TestCluster([node_group, node_group2])
1130
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1131
# should fail due to missing NN
1133
cluster_config.create_operational_config(cluster, [])
1134
self.fail('Validation should have thrown an exception')
1135
except ex.InvalidComponentCountException:
1139
node_group2 = TestNodeGroup(
1140
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1141
"HISTORYSERVER", "ZOOKEEPER_SERVER",
1143
cluster = base.TestCluster([node_group, node_group2])
1144
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1145
# should validate successfully now
1146
cluster_config.create_operational_config(cluster, [])
1148
# should cause validation exception due to 2 NN
1149
node_group3 = TestNodeGroup(
1150
'master2', [server2], ["NAMENODE"])
1151
cluster = base.TestCluster([node_group, node_group2, node_group3])
1152
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1154
cluster_config.create_operational_config(cluster, [])
1155
self.fail('Validation should have thrown an exception')
1156
except ex.InvalidComponentCountException:
1160
def test_validate_yarn(self, patched):
1161
server = base.TestServer('host1', 'slave', '11111', 3,
1162
'111.11.1111', '222.22.2222')
1163
server2 = base.TestServer('host2', 'master', '11112', 3,
1164
'111.11.1112', '222.22.2223')
1166
node_group = TestNodeGroup(
1167
'slave', [server], ["DATANODE", "NODEMANAGER",
1168
"HDFS_CLIENT", "MAPREDUCE2_CLIENT"])
1169
node_group2 = TestNodeGroup(
1170
'master', [server2], ["NAMENODE", "AMBARI_SERVER",
1171
"ZOOKEEPER_SERVER", "HISTORYSERVER"])
1173
cluster = base.TestCluster([node_group, node_group2])
1174
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1175
# should fail due to missing JT
1177
cluster_config.create_operational_config(cluster, [])
1178
self.fail('Validation should have thrown an exception')
1179
except ex.InvalidComponentCountException:
1182
node_group2 = TestNodeGroup(
1183
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1184
"AMBARI_SERVER", "ZOOKEEPER_SERVER",
1186
cluster = base.TestCluster([node_group, node_group2])
1187
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1188
# should validate successfully now
1189
cluster_config.create_operational_config(cluster, [])
1191
# should cause validation exception due to 2 JT
1192
node_group3 = TestNodeGroup(
1193
'master', [server2], ["RESOURCEMANAGER"])
1194
cluster = base.TestCluster([node_group, node_group2, node_group3])
1195
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1197
cluster_config.create_operational_config(cluster, [])
1198
self.fail('Validation should have thrown an exception')
1199
except ex.InvalidComponentCountException:
1203
# should cause validation exception due to 2 NN
1204
node_group3 = TestNodeGroup(
1205
'master', [server2], ["NAMENODE"])
1206
cluster = base.TestCluster([node_group, node_group2, node_group3])
1207
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1209
cluster_config.create_operational_config(cluster, [])
1210
self.fail('Validation should have thrown an exception')
1211
except ex.InvalidComponentCountException:
1215
# should fail due to no nodemanager
1216
node_group = TestNodeGroup(
1217
'slave', [server], ["DATANODE", "HDFS_CLIENT",
1218
"MAPREDUCE2_CLIENT"])
1219
cluster = base.TestCluster([node_group, node_group2])
1220
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1221
# should fail due to missing JT
1223
cluster_config.create_operational_config(cluster, [])
1224
self.fail('Validation should have thrown an exception')
1225
except ex.InvalidComponentCountException:
1229
def test_validate_hive(self, patched):
1230
server = base.TestServer('host1', 'slave', '11111', 3,
1231
'111.11.1111', '222.22.2222')
1232
server2 = base.TestServer('host2', 'master', '11112', 3,
1233
'111.11.1112', '222.22.2223')
1235
node_group = TestNodeGroup(
1236
'slave', [server], ["DATANODE", "NODEMANAGER",
1238
node_group2 = TestNodeGroup(
1239
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1240
"HISTORYSERVER", "AMBARI_SERVER",
1241
"ZOOKEEPER_SERVER"])
1243
cluster = base.TestCluster([node_group, node_group2])
1244
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1245
# should fail due to missing hive_server
1247
cluster_config.create_operational_config(cluster, [])
1248
self.fail('Validation should have thrown an exception')
1249
except ex.InvalidComponentCountException:
1252
node_group2 = TestNodeGroup(
1253
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1254
"HIVE_SERVER", "AMBARI_SERVER",
1255
"ZOOKEEPER_SERVER", "HISTORYSERVER"])
1256
cluster = base.TestCluster([node_group, node_group2])
1257
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1258
# should validate successfully now
1259
cluster_config.create_operational_config(cluster, [])
1261
# should cause validation exception due to 2 HIVE_SERVER
1262
node_group3 = TestNodeGroup(
1263
'master', [server2], ["HIVE_SERVER"])
1264
cluster = base.TestCluster([node_group, node_group2, node_group3])
1265
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1267
cluster_config.create_operational_config(cluster, [])
1268
self.fail('Validation should have thrown an exception')
1269
except ex.InvalidComponentCountException:
1273
def test_validate_zk(self, patched):
1274
server = base.TestServer('host1', 'slave', '11111', 3,
1275
'111.11.1111', '222.22.2222')
1276
server2 = base.TestServer('host2', 'master', '11112', 3,
1277
'111.11.1112', '222.22.2223')
1279
node_group = TestNodeGroup(
1280
'slave', [server], ["DATANODE", "NODEMANAGER",
1281
"ZOOKEEPER_CLIENT"])
1282
node_group2 = TestNodeGroup(
1283
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1284
"AMBARI_SERVER", "HISTORYSERVER"])
1286
cluster = base.TestCluster([node_group, node_group2])
1287
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1288
# should fail due to missing ZOOKEEPER_SERVER
1290
cluster_config.create_operational_config(cluster, [])
1291
self.fail('Validation should have thrown an exception')
1292
except ex.InvalidComponentCountException:
1295
node_group2 = TestNodeGroup(
1296
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1297
"HISTORYSERVER", "ZOOKEEPER_SERVER",
1299
cluster = base.TestCluster([node_group, node_group2])
1300
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1301
# should validate successfully now
1302
cluster_config.create_operational_config(cluster, [])
1304
# should cause validation exception due to 2 ZOOKEEPER_SERVER
1305
node_group3 = TestNodeGroup(
1306
'master', [server2], ["ZOOKEEPER_SERVER"])
1307
cluster = base.TestCluster([node_group, node_group2, node_group3])
1308
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1310
cluster_config.create_operational_config(cluster, [])
1311
self.fail('Validation should have thrown an exception')
1312
except ex.InvalidComponentCountException:
1316
def test_validate_oozie(self, patched):
1317
server = base.TestServer('host1', 'slave', '11111', 3,
1318
'111.11.1111', '222.22.2222')
1319
server2 = base.TestServer('host2', 'master', '11112', 3,
1320
'111.11.1112', '222.22.2223')
1322
node_group = TestNodeGroup(
1323
'slave', [server], ["DATANODE", "NODEMANAGER",
1325
node_group2 = TestNodeGroup(
1326
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1327
"HISTORYSERVER", "AMBARI_SERVER",
1328
"ZOOKEEPER_SERVER"])
1330
cluster = base.TestCluster([node_group, node_group2])
1331
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1332
# should fail due to missing OOZIE_SERVER
1334
cluster_config.create_operational_config(cluster, [])
1335
self.fail('Validation should have thrown an exception')
1336
except ex.InvalidComponentCountException:
1339
node_group2 = TestNodeGroup(
1340
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1341
"OOZIE_SERVER", "AMBARI_SERVER",
1342
"ZOOKEEPER_SERVER", "HISTORYSERVER"])
1343
cluster = base.TestCluster([node_group, node_group2])
1344
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1345
# should validate successfully now
1346
cluster_config.create_operational_config(cluster, [])
1348
# should cause validation exception due to 2 OOZIE_SERVER
1349
node_group3 = TestNodeGroup(
1350
'master', [server2], ["OOZIE_SERVER"])
1351
cluster = base.TestCluster([node_group, node_group2, node_group3])
1352
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1354
cluster_config.create_operational_config(cluster, [])
1355
self.fail('Validation should have thrown an exception')
1356
except ex.InvalidComponentCountException:
1360
def test_validate_ganglia(self, patched):
1361
server = base.TestServer('host1', 'slave', '11111', 3,
1362
'111.11.1111', '222.22.2222')
1363
server2 = base.TestServer('host2', 'master', '11112', 3,
1364
'111.11.1112', '222.22.2223')
1366
node_group = TestNodeGroup(
1367
'slave', [server], ["DATANODE", "NODEMANAGER",
1369
node_group2 = TestNodeGroup(
1370
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1371
"HISTORYSERVER", "AMBARI_SERVER",
1372
"ZOOKEEPER_SERVER"])
1374
cluster = base.TestCluster([node_group, node_group2])
1375
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1376
# should fail due to missing GANGLIA_SERVER
1378
cluster_config.create_operational_config(cluster, [])
1379
self.fail('Validation should have thrown an exception')
1380
except ex.InvalidComponentCountException:
1383
node_group2 = TestNodeGroup(
1384
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1385
"GANGLIA_SERVER", "AMBARI_SERVER",
1386
"HISTORYSERVER", "ZOOKEEPER_SERVER"])
1387
cluster = base.TestCluster([node_group, node_group2])
1388
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1389
# should validate successfully now
1390
cluster_config.create_operational_config(cluster, [])
1392
# should cause validation exception due to 2 GANGLIA_SERVER
1393
node_group3 = TestNodeGroup(
1394
'master2', [server2], ["GANGLIA_SERVER"])
1395
cluster = base.TestCluster([node_group, node_group2, node_group3])
1396
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1398
cluster_config.create_operational_config(cluster, [])
1399
self.fail('Validation should have thrown an exception')
1400
except ex.InvalidComponentCountException:
1404
def test_validate_ambari(self, patched):
1405
server = base.TestServer('host1', 'slave', '11111', 3,
1406
'111.11.1111', '222.22.2222')
1407
server2 = base.TestServer('host2', 'master', '11112', 3,
1408
'111.11.1112', '222.22.2223')
1410
node_group = TestNodeGroup(
1411
'slave', [server], ["DATANODE", "NODEMANAGER",
1413
node_group2 = TestNodeGroup(
1414
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1415
"HISTORYSERVER", "ZOOKEEPER_SERVER"])
1417
cluster = base.TestCluster([node_group, node_group2])
1418
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1419
# should fail due to missing AMBARI_SERVER
1421
cluster_config.create_operational_config(cluster, [])
1422
self.fail('Validation should have thrown an exception')
1423
except ex.InvalidComponentCountException:
1426
node_group2 = TestNodeGroup(
1427
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1428
"HISTORYSERVER", "AMBARI_SERVER",
1429
"ZOOKEEPER_SERVER"])
1430
cluster = base.TestCluster([node_group, node_group2])
1431
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1432
# should validate successfully now
1433
cluster_config.create_operational_config(cluster, [])
1435
# should cause validation exception due to 2 AMBARI_SERVER
1436
node_group2 = TestNodeGroup(
1437
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1438
"AMBARI_SERVER", "ZOOKEEPER_SERVER"])
1439
node_group3 = TestNodeGroup(
1440
'master', [server2], ["AMBARI_SERVER"])
1441
cluster = base.TestCluster([node_group, node_group2, node_group3])
1442
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1444
cluster_config.create_operational_config(cluster, [])
1445
self.fail('Validation should have thrown an exception')
1446
except ex.InvalidComponentCountException:
1450
def test_validate_hue(self, patched):
1451
server = base.TestServer('host1', 'slave', '11111', 3,
1452
'111.11.1111', '222.22.2222')
1453
server2 = base.TestServer('host2', 'master', '11112', 3,
1454
'111.11.1112', '222.22.2223')
1456
node_group = TestNodeGroup(
1457
'slave', [server], ["DATANODE", "NODEMANAGER",
1459
node_group2 = TestNodeGroup(
1460
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1461
"HISTORYSERVER", "AMBARI_SERVER",
1462
"ZOOKEEPER_SERVER"])
1464
cluster = base.TestCluster([node_group, node_group2])
1465
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1466
# should fail due to missing hive_server, oozie_server and
1467
# webhchat_server which is required by hue
1468
self.assertRaises(ex.RequiredServiceMissingException,
1469
cluster_config.create_operational_config,
1472
node_group2 = TestNodeGroup(
1473
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1474
"HIVE_SERVER", "AMBARI_SERVER",
1475
"ZOOKEEPER_SERVER", "HISTORYSERVER"])
1476
cluster = base.TestCluster([node_group, node_group2])
1477
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1478
# should fail due to missing oozie_server and webhchat_server, which
1479
# is required by hue
1480
self.assertRaises(ex.RequiredServiceMissingException,
1481
cluster_config.create_operational_config,
1484
node_group = TestNodeGroup(
1485
'slave', [server], ["DATANODE", "NODEMANAGER",
1486
"OOZIE_CLIENT", "HUE"])
1487
node_group2 = TestNodeGroup(
1488
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1489
"HIVE_SERVER", "AMBARI_SERVER",
1490
"ZOOKEEPER_SERVER", "HISTORYSERVER",
1492
cluster = base.TestCluster([node_group, node_group2])
1493
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1494
# should fail due to missing webhchat_server, which is required by hue
1495
self.assertRaises(ex.RequiredServiceMissingException,
1496
cluster_config.create_operational_config,
1499
node_group = TestNodeGroup(
1500
'slave', [server], ["DATANODE", "NODEMANAGER",
1501
"OOZIE_CLIENT", "HUE"])
1502
node_group2 = TestNodeGroup(
1503
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1504
"HIVE_SERVER", "AMBARI_SERVER",
1505
"ZOOKEEPER_SERVER", "HISTORYSERVER",
1506
"OOZIE_SERVER", "WEBHCAT_SERVER"])
1507
cluster = base.TestCluster([node_group, node_group2])
1508
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1509
# should validate successfully now
1510
cluster_config.create_operational_config(cluster, [])
1512
# should have automatically added a HIVE_CLIENT to "slave" node group
1513
hue_ngs = cluster_config.get_node_groups_containing_component("HUE")
1514
self.assertEqual(1, len(hue_ngs))
1515
self.assertIn("HIVE_CLIENT", hue_ngs.pop().components)
1517
# should cause validation exception due to 2 hue instances
1518
node_group3 = TestNodeGroup(
1519
'master', [server2], ["HUE"])
1520
cluster = base.TestCluster([node_group, node_group2, node_group3])
1521
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1522
self.assertRaises(ex.InvalidComponentCountException,
1523
cluster_config.create_operational_config,
1526
def test_validate_scaling_existing_ng(self, patched):
1527
server = base.TestServer('host1', 'slave', '11111', 3,
1528
'111.11.1111', '222.22.2222')
1529
server2 = base.TestServer('host2', 'master', '11112', 3,
1530
'111.11.1112', '222.22.2223')
1532
node_group = TestNodeGroup(
1533
'slave', [server], ["DATANODE", "NODEMANAGER"])
1534
node_group2 = TestNodeGroup(
1535
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1536
"HISTORYSERVER", "AMBARI_SERVER",
1537
"ZOOKEEPER_SERVER"])
1539
cluster = base.TestCluster([node_group, node_group2])
1540
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1541
# sanity check that original config validates
1542
cluster_config.create_operational_config(cluster, [])
1544
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1545
scaled_groups = {'master': 2}
1546
# should fail due to 2 JT
1548
cluster_config.create_operational_config(
1549
cluster, [], scaled_groups)
1550
self.fail('Validation should have thrown an exception')
1551
except ex.InvalidComponentCountException:
1555
def test_scale(self, patched):
1557
server = base.TestServer('host1', 'slave', '11111', 3,
1558
'111.11.1111', '222.22.2222')
1559
server2 = base.TestServer('host2', 'master', '11112', 3,
1560
'111.11.1112', '222.22.2223')
1562
node_group = TestNodeGroup(
1563
'slave', [server], ["DATANODE", "NODEMANAGER",
1565
node_group2 = TestNodeGroup(
1566
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1567
"HISTORYSERVER", "ZOOKEEPER_SERVER",
1570
cluster = base.TestCluster([node_group, node_group2])
1572
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1573
# sanity check that original config validates
1574
cluster_config.create_operational_config(cluster, [])
1576
slave_ng = cluster_config.node_groups['slave']
1577
self.assertEqual(1, slave_ng.count)
1579
cluster_config.scale({'slave': 2})
1581
self.assertEqual(2, slave_ng.count)
1583
def test_get_deployed_configurations(self, patched):
1585
server = base.TestServer('host1', 'slave', '11111', 3,
1586
'111.11.1111', '222.22.2222')
1587
server2 = base.TestServer('host2', 'master', '11112', 3,
1588
'111.11.1112', '222.22.2223')
1590
node_group = TestNodeGroup(
1591
'slave', [server], ["DATANODE", "NODEMANAGER"])
1592
node_group2 = TestNodeGroup(
1593
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1594
"AMBARI_SERVER", "ZOOKEEPER_SERVER",
1597
cluster = base.TestCluster([node_group, node_group2])
1599
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1600
# sanity check that original config validates
1601
cluster_config.create_operational_config(cluster, [])
1602
configs = cluster_config.get_deployed_configurations()
1603
expected_configs = set(['mapred-site', 'ambari', 'hdfs-site',
1604
'global', 'core-site', 'yarn-site'])
1605
self.assertEqual(expected_configs, expected_configs & configs)
1607
def test_get_deployed_node_group_count(self, patched):
1609
server = base.TestServer('host1', 'slave', '11111', 3,
1610
'111.11.1111', '222.22.2222')
1611
server2 = base.TestServer('host2', 'master', '11112', 3,
1612
'111.11.1112', '222.22.2223')
1614
slave_group = TestNodeGroup(
1615
'slave', [server], ["DATANODE", "NODEMANAGER"])
1616
slave2_group = TestNodeGroup(
1617
'slave2', [server], ["DATANODE", "NODEMANAGER"])
1618
master_group = TestNodeGroup(
1619
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1620
"HISTORYSERVER", "AMBARI_SERVER",
1621
"ZOOKEEPER_SERVER"])
1623
cluster = base.TestCluster([master_group, slave_group, slave2_group])
1624
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1625
cluster_config.create_operational_config(cluster, [])
1627
self.assertEqual(2, cluster_config.get_deployed_node_group_count(
1629
self.assertEqual(1, cluster_config.get_deployed_node_group_count(
1632
def test_get_node_groups_containing_component(self, patched):
1633
server = base.TestServer('host1', 'slave', '11111', 3,
1634
'111.11.1111', '222.22.2222')
1635
server2 = base.TestServer('host2', 'master', '11112', 3,
1636
'111.11.1112', '222.22.2223')
1638
slave_group = TestNodeGroup(
1639
'slave', [server], ["DATANODE", "NODEMANAGER"])
1640
slave2_group = TestNodeGroup(
1641
'slave2', [server], ["DATANODE", "NODEMANAGER"])
1642
master_group = TestNodeGroup(
1643
'master', [server2], ["NAMENODE", "RESOURCEMANAGER",
1644
"HISTORYSERVER", "AMBARI_SERVER",
1645
"ZOOKEEPER_SERVER"])
1647
cluster = base.TestCluster([master_group, slave_group, slave2_group])
1648
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1649
cluster_config.create_operational_config(cluster, [])
1651
datanode_ngs = cluster_config.get_node_groups_containing_component(
1653
self.assertEqual(2, len(datanode_ngs))
1654
ng_names = set([datanode_ngs[0].name, datanode_ngs[1].name])
1655
self.assertIn('slave', ng_names)
1656
self.assertIn('slave2', ng_names)
1658
def test_get_components_for_type(self, patched):
1660
cluster_config = base.create_clusterspec(hdp_version='2.0.6')
1661
clients = cluster_config.get_components_for_type('CLIENT')
1662
slaves = cluster_config.get_components_for_type('SLAVE')
1663
masters = cluster_config.get_components_for_type('MASTER')
1665
expected_clients = set(['HCAT', 'ZOOKEEPER_CLIENT',
1666
'MAPREDUCE2_CLIENT', 'HIVE_CLIENT',
1667
'HDFS_CLIENT', 'PIG', 'YARN_CLIENT', 'HUE'])
1668
self.assertEqual(expected_clients, expected_clients & set(clients))
1670
expected_slaves = set(['AMBARI_AGENT', 'NODEMANAGER', 'DATANODE',
1672
self.assertEqual(expected_slaves, expected_slaves & set(slaves))
1674
expected_masters = set(['SECONDARY_NAMENODE', 'HIVE_METASTORE',
1675
'AMBARI_SERVER', 'RESOURCEMANAGER',
1676
'WEBHCAT_SERVER', 'NAGIOS_SERVER',
1677
'MYSQL_SERVER', 'ZOOKEEPER_SERVER',
1678
'NAMENODE', 'HIVE_SERVER', 'GANGLIA_SERVER'])
1679
self.assertEqual(expected_masters, expected_masters & set(masters))
1681
def _assert_services(self, services):
1683
for service in services:
1685
found_services.append(name)
1686
self.service_validators[name](service)
1688
self.assertEqual(15, len(found_services))
1689
self.assertIn('HDFS', found_services)
1690
self.assertIn('MAPREDUCE2', found_services)
1691
self.assertIn('GANGLIA', found_services)
1692
self.assertIn('NAGIOS', found_services)
1693
self.assertIn('AMBARI', found_services)
1694
self.assertIn('PIG', found_services)
1695
self.assertIn('HIVE', found_services)
1696
self.assertIn('HCATALOG', found_services)
1697
self.assertIn('ZOOKEEPER', found_services)
1698
self.assertIn('WEBHCAT', found_services)
1699
self.assertIn('OOZIE', found_services)
1700
self.assertIn('SQOOP', found_services)
1701
self.assertIn('HBASE', found_services)
1702
self.assertIn('HUE', found_services)
1704
def _assert_hdfs(self, service):
1705
self.assertEqual('HDFS', service.name)
1707
found_components = {}
1708
for component in service.components:
1709
found_components[component.name] = component
1711
self.assertEqual(4, len(found_components))
1712
self._assert_component('NAMENODE', 'MASTER', "1",
1713
found_components['NAMENODE'])
1714
self._assert_component('DATANODE', 'SLAVE', "1+",
1715
found_components['DATANODE'])
1716
self._assert_component('SECONDARY_NAMENODE', 'MASTER', "1",
1717
found_components['SECONDARY_NAMENODE'])
1718
self._assert_component('HDFS_CLIENT', 'CLIENT', "1+",
1719
found_components['HDFS_CLIENT'])
1720
# TODO(jspeidel) config
1722
def _assert_mrv2(self, service):
1723
self.assertEqual('MAPREDUCE2', service.name)
1725
found_components = {}
1726
for component in service.components:
1727
found_components[component.name] = component
1729
self.assertEqual(2, len(found_components))
1730
self._assert_component('HISTORYSERVER', 'MASTER', "1",
1731
found_components['HISTORYSERVER'])
1732
self._assert_component('MAPREDUCE2_CLIENT', 'CLIENT', "1+",
1733
found_components['MAPREDUCE2_CLIENT'])
1735
def _assert_yarn(self, service):
1736
self.assertEqual('YARN', service.name)
1738
found_components = {}
1739
for component in service.components:
1740
found_components[component.name] = component
1742
self.assertEqual(3, len(found_components))
1743
self._assert_component('RESOURCEMANAGER', 'MASTER', "1",
1744
found_components['RESOURCEMANAGER'])
1745
self._assert_component('NODEMANAGER', 'SLAVE', "1+",
1746
found_components['NODEMANAGER'])
1747
self._assert_component('YARN_CLIENT', 'CLIENT', "1+",
1748
found_components['YARN_CLIENT'])
1750
def _assert_nagios(self, service):
1751
self.assertEqual('NAGIOS', service.name)
1753
found_components = {}
1754
for component in service.components:
1755
found_components[component.name] = component
1757
self.assertEqual(1, len(found_components))
1758
self._assert_component('NAGIOS_SERVER', 'MASTER', "1",
1759
found_components['NAGIOS_SERVER'])
1761
def _assert_ganglia(self, service):
1762
self.assertEqual('GANGLIA', service.name)
1764
found_components = {}
1765
for component in service.components:
1766
found_components[component.name] = component
1768
self.assertEqual(2, len(found_components))
1769
self._assert_component('GANGLIA_SERVER', 'MASTER', "1",
1770
found_components['GANGLIA_SERVER'])
1771
self._assert_component('GANGLIA_MONITOR', 'SLAVE', "1+",
1772
found_components['GANGLIA_MONITOR'])
1774
def _assert_ambari(self, service):
1775
self.assertEqual('AMBARI', service.name)
1777
found_components = {}
1778
for component in service.components:
1779
found_components[component.name] = component
1781
self.assertEqual(2, len(found_components))
1782
self._assert_component('AMBARI_SERVER', 'MASTER', "1",
1783
found_components['AMBARI_SERVER'])
1784
self._assert_component('AMBARI_AGENT', 'SLAVE', "1+",
1785
found_components['AMBARI_AGENT'])
1787
self.assertEqual(1, len(service.users))
1788
user = service.users[0]
1789
self.assertEqual('admin', user.name)
1790
self.assertEqual('admin', user.password)
1791
groups = user.groups
1792
self.assertEqual(1, len(groups))
1793
self.assertIn('admin', groups)
1795
def _assert_pig(self, service):
1796
self.assertEqual('PIG', service.name)
1797
self.assertEqual(1, len(service.components))
1798
self.assertEqual('PIG', service.components[0].name)
1800
def _assert_hive(self, service):
1801
self.assertEqual('HIVE', service.name)
1802
found_components = {}
1803
for component in service.components:
1804
found_components[component.name] = component
1806
self.assertEqual(4, len(found_components))
1807
self._assert_component('HIVE_SERVER', 'MASTER', "1",
1808
found_components['HIVE_SERVER'])
1809
self._assert_component('HIVE_METASTORE', 'MASTER', "1",
1810
found_components['HIVE_METASTORE'])
1811
self._assert_component('MYSQL_SERVER', 'MASTER', "1",
1812
found_components['MYSQL_SERVER'])
1813
self._assert_component('HIVE_CLIENT', 'CLIENT', "1+",
1814
found_components['HIVE_CLIENT'])
1816
def _assert_hcatalog(self, service):
1817
self.assertEqual('HCATALOG', service.name)
1818
self.assertEqual(1, len(service.components))
1819
self.assertEqual('HCAT', service.components[0].name)
1821
def _assert_zookeeper(self, service):
1822
self.assertEqual('ZOOKEEPER', service.name)
1823
found_components = {}
1824
for component in service.components:
1825
found_components[component.name] = component
1827
self.assertEqual(2, len(found_components))
1828
self._assert_component('ZOOKEEPER_SERVER', 'MASTER', "1",
1829
found_components['ZOOKEEPER_SERVER'])
1830
self._assert_component('ZOOKEEPER_CLIENT', 'CLIENT', "1+",
1831
found_components['ZOOKEEPER_CLIENT'])
1833
def _assert_webhcat(self, service):
1834
self.assertEqual('WEBHCAT', service.name)
1835
self.assertEqual(1, len(service.components))
1836
self.assertEqual('WEBHCAT_SERVER', service.components[0].name)
1838
def _assert_oozie(self, service):
1839
self.assertEqual('OOZIE', service.name)
1840
found_components = {}
1841
for component in service.components:
1842
found_components[component.name] = component
1844
self.assertEqual(2, len(found_components))
1845
self._assert_component('OOZIE_SERVER', 'MASTER', "1",
1846
found_components['OOZIE_SERVER'])
1847
self._assert_component('OOZIE_CLIENT', 'CLIENT', "1+",
1848
found_components['OOZIE_CLIENT'])
1850
def _assert_sqoop(self, service):
1851
self.assertEqual('SQOOP', service.name)
1852
self.assertEqual(1, len(service.components))
1853
self.assertEqual('SQOOP', service.components[0].name)
1855
def _assert_hbase(self, service):
1856
self.assertEqual('HBASE', service.name)
1857
found_components = {}
1858
for component in service.components:
1859
found_components[component.name] = component
1861
self.assertEqual(3, len(found_components))
1862
self._assert_component('HBASE_MASTER', 'MASTER', "1",
1863
found_components['HBASE_MASTER'])
1864
self._assert_component('HBASE_REGIONSERVER', 'SLAVE', "1+",
1865
found_components['HBASE_REGIONSERVER'])
1866
self._assert_component('HBASE_CLIENT', 'CLIENT', "1+",
1867
found_components['HBASE_CLIENT'])
1869
def _assert_hue(self, service):
1870
self.assertEqual('HUE', service.name)
1871
found_components = {}
1872
for component in service.components:
1873
found_components[component.name] = component
1875
self.assertEqual(1, len(found_components))
1876
self._assert_component('HUE', 'CLIENT', "1",
1877
found_components['HUE'])
1879
def _assert_component(self, name, comp_type, cardinality, component):
1880
self.assertEqual(name, component.name)
1881
self.assertEqual(comp_type, component.type)
1882
self.assertEqual(cardinality, component.cardinality)
1884
def _assert_configurations(self, configurations):
1885
self.assertEqual(16, len(configurations))
1886
self.assertIn('global', configurations)
1887
self.assertIn('core-site', configurations)
1888
self.assertIn('yarn-site', configurations)
1889
self.assertIn('mapred-site', configurations)
1890
self.assertIn('hdfs-site', configurations)
1891
self.assertIn('ambari', configurations)
1892
self.assertIn('webhcat-site', configurations)
1893
self.assertIn('hive-site', configurations)
1894
self.assertIn('oozie-site', configurations)
1895
self.assertIn('hbase-site', configurations)
1896
self.assertIn('capacity-scheduler', configurations)
1897
self.assertIn('hue-ini', configurations)
1898
self.assertIn('hue-core-site', configurations)
1899
self.assertIn('hue-hdfs-site', configurations)
1900
self.assertIn('hue-webhcat-site', configurations)
1901
self.assertIn('hue-oozie-site', configurations)
1904
class TestNodeGroup:
1905
def __init__(self, name, instances, node_processes, count=1):
1907
self.instances = instances
1910
self.node_processes = node_processes
1914
def storage_paths(self):
1918
class TestUserInputConfig:
1919
def __init__(self, tag, target, name):
1921
self.applicable_target = target