~sward-h/charms/precise/hadoop/configure-dirs

« back to all changes in this revision

Viewing changes to hooks/hdfs-common

  • Committer: James Page
  • Date: 2012-02-27 12:40:14 UTC
  • Revision ID: james.page@canonical.com-20120227124014-56f631h6swnyj1dg
Updates for secondary namenode

Show diffs side-by-side

added added

removed removed

Lines of Context:
49
49
open_ports () {
50
50
    case $1 in
51
51
        namenode)
52
 
            open-port 8020 # DFS Nameserver
53
 
            open-port 50070 # DFS Web UI
 
52
            open-port 8020
 
53
            open-port 50070 
54
54
            ;;
55
55
        datanode)
56
 
            open-port 50075 # Web interface for datanode
 
56
            open-port 50010
 
57
            open-port 50020
 
58
            open-port 50075
 
59
            ;;
 
60
        secondarynamenode)
 
61
            open-port 50090
57
62
            ;;
58
63
    esac
59
64
}
60
65
 
61
66
configure_hadoop () {
62
 
    hbase=`config-get hbase`
63
 
    # Copy distribution configuration and then 
64
 
    # specialize
 
67
    # Copy distribution configuration and then specialize
65
68
    if [ ! -d /etc/hadoop/conf.juju ]
66
69
    then
67
70
        cp -r /etc/hadoop/conf.empty /etc/hadoop/conf.juju
75
78
    # Configure HDFS
76
79
    dir=`dotdee --dir /etc/hadoop/conf.juju/hdfs-site.xml`
77
80
    config_basic $dir
78
 
    config_element "dfs.name.dir" "/var/lib/hadoop/cache/hadoop/dfs/name" > \
 
81
    # Purge existing configuration
 
82
    rm -f $dir/1*-dfs.*
 
83
    config_element "dfs.name.dir" \
 
84
        "/var/lib/hadoop/cache/hadoop/dfs/name" > \
79
85
        $dir/10-dfs.name.dir
80
 
    config_element "dfs.namenode.handler.count" "`config-get dfs.namenode.handler.count`" > \
 
86
    config_element "dfs.namenode.handler.count" \
 
87
        "`config-get dfs.namenode.handler.count`" > \
81
88
        $dir/11-dfs.namenode.handler.count
82
 
    config_element "dfs.block.size" "`config-get dfs.block.size`" > \
 
89
    config_element "dfs.block.size" i\
 
90
        "`config-get dfs.block.size`" > \
83
91
        $dir/12-dfs.block.size
84
 
    if [ "$hbase" = "True" ]
85
 
    then
86
 
        # Turn on append support for HBase
 
92
    config_element "dfs.datanode.max.xcievers" \
 
93
        "`config-get dfs.datanode.max.xcievers`" > \
 
94
        $dir/13-dfs.datanode.max.xcievers
 
95
    [ "`config-get hbase`" = "True" ] && \
87
96
        config_element "dfs.support.append" "true" > \
88
 
            $dir/15-dfs.support.append
89
 
        config_element "dfs.datanode.max.xcievers" "4096" > \
90
 
            $dir/16-dfs.datanode.max.xcievers
91
 
    else
92
 
        rm -f $dir/15-dfs.support.append $dir/16-dfs.datanode.max.xcievers
93
 
    fi
94
 
    if [ "`config-get webhdfs`" = "True" ]
95
 
    then
 
97
            $dir/14-dfs.support.append || :
 
98
    [ "`config-get webhdfs`" = "True" ] && \
96
99
        config_element "dfs.webhdfs.enabled" "true" > \
97
 
            $dir/20-dfs.webhdfs.enabled 
98
 
    else
99
 
        rm -f $dir/20-dfs.webhdfs.enabled
100
 
    fi
 
100
            $dir/15-dfs.webhdfs.enabled || :
 
101
    # TODO - secure this hadoop installation!
 
102
    config_element "dfs.permissions" "false" > \
 
103
            $dir/16-dfs.permissions
101
104
    dotdee --update /etc/hadoop/conf.juju/hdfs-site.xml || true
102
105
    # Configure Hadoop Core
103
106
    dir=`dotdee --dir /etc/hadoop/conf.juju/core-site.xml`
104
107
    config_basic $dir
 
108
    rm -f $dir/1*-*
105
109
    config_element "hadoop.tmp.dir" "/var/lib/hadoop/cache/\${user.name}" > \
106
 
        $dir/11-hadoop.tmp.dir
 
110
        $dir/10-hadoop.tmp.dir
107
111
    config_element "io.file.buffer.size" "`config-get io.file.buffer.size`" > \
108
 
        $dir/12-io.file.buffer.size
 
112
        $dir/11-io.file.buffer.size
109
113
    dotdee --update /etc/hadoop/conf.juju/core-site.xml || true
110
114
}
111
115
 
113
117
    dir=`dotdee --dir /etc/hadoop/conf.juju/core-site.xml`
114
118
    juju-log "Configuring service unit as $1..."
115
119
    case $1 in
116
 
        datanode)
 
120
        datanode|secondarynamenode)
117
121
            namenode_address=`relation-get private-address`
118
122
            config_element "fs.default.name" "hdfs://$namenode_address:8020" > \
119
 
                $dir/10-fs.default.name
 
123
                $dir/50-fs.default.name
120
124
            ;;
121
125
        namenode)
122
126
            private_address=`unit-get private-address`
123
127
            config_element "fs.default.name" "hdfs://$private_address:8020" > \
124
 
                $dir/10-fs.default.name
 
128
                $dir/50-fs.default.name
125
129
            ;;
126
130
    esac
127
131
    dotdee --update /etc/hadoop/conf.juju/core-site.xml || true
 
132
    dir=`dotdee --dir /etc/hadoop/conf.juju/hdfs-site.xml`
 
133
    case $1 in
 
134
        secondarynamenode)
 
135
            namenode_address=`relation-get private-address`
 
136
            config_element "dfs.http.address" "$namenode_address:50070" > \
 
137
                $dir/50-dfs.http.address
 
138
            ;;
 
139
    esac
 
140
    dotdee --update /etc/hadoop/conf.juju/hdfs-site.xml || true
128
141
}
129
142
 
130
143
install_packages () {
131
 
    juju-log "Installing extra packages for $1"
132
144
    case $1 in
133
 
        namenode)        
134
 
            apt-get -y install hadoop-namenode
 
145
        namenode|datanode|secondarynamenode) 
 
146
            juju-log "Installing extra packages for $1"
 
147
            apt-get -y install hadoop-$1
135
148
            ;;
136
 
        datanode)
137
 
            apt-get -y install hadoop-datanode
 
149
        *)
 
150
            juju-log "Unsupported role $1..."
138
151
            ;;
139
152
    esac
140
153
}
165
178
# Determines what type of node this is
166
179
resolve_role () {
167
180
    role="unconfigured"
168
 
    [ -d /usr/share/doc/hadoop-namenode ] && role="namenode" || :
169
 
    [ -d /usr/share/doc/hadoop-datanode ] && role="datanode" || :
 
181
    [ -d /usr/share/doc/hadoop-namenode ] && \
 
182
        role="namenode" || :
 
183
    [ -d /usr/share/doc/hadoop-secondarynamenode ] && \
 
184
        role="secondarynamenode" || :
 
185
    [ -d /usr/share/doc/hadoop-datanode ] && \
 
186
        role="datanode" || :
170
187
    echo "$role"
171
188
}
172
189
role=`resolve_role`
198
215
            namenode)
199
216
                juju-log "Already configured as namenode"
200
217
                # Unit should only ever assume this role once so no
201
 
                # further action is required
202
 
                ;;
203
 
            *)
204
 
                juju-log "Already configured as another personality"
 
218
                # further action is required - this prevents adding
 
219
                # an additional unit to the namenode master breaking
 
220
                # things.
 
221
                ;;
 
222
            *)
 
223
                juju-log "Already configured as another role: $role"
 
224
                exit 1
 
225
                ;;
 
226
        esac
 
227
        ;;
 
228
    secondarynamenode-relation-joined)
 
229
        case $role in
 
230
            unconfigured)
 
231
                juju-log "Configuring this unit as a secondarynamenode"
 
232
                role="secondarynamenode"
 
233
                install_packages $role
 
234
                configure_role $role
 
235
                restart_hadoop $role
 
236
                open_ports $role
 
237
                ;;
 
238
            secondarynamenode)
 
239
                juju-log "Already configured as secondarynamenode"
 
240
                # Unit should only ever assume this role once so no
 
241
                # further action is required - this prevents adding
 
242
                # an additional unit to the namenode master breaking
 
243
                # things.
 
244
                ;;
 
245
            *)
 
246
                juju-log "Already configured as another role: $role"
205
247
                exit 1
206
248
                ;;
207
249
        esac
219
261
            datanode)
220
262
                juju-log "Already configured as datanode"
221
263
                # Unit should only ever assume this role once so no
222
 
                # further action is required
 
264
                # further action is required - this prevents adding
 
265
                # an additional unit to the namenode master breaking
 
266
                # things.
223
267
                ;;
224
268
            *)
225
 
                juju-log "Already configured as another personality"
 
269
                juju-log "Already configured as another role: $role"
226
270
                exit 1
227
271
                ;;
228
272
        esac