本文重点在于配置文件的修改
hadoop的安装和配置可见:Hadoop集群搭建-完全分布式
服务器准备
VMware Workstation Pro 15.5
三台 Centos 6.5 64bit
Apache Hadoop 2.6.0
Apache-Zookeeper-3.5.7
三节点为例搭建,角色分配:
节点 | 角色分配 |
---|---|
node1 | NameNode DataNode JounalNode ZKFC ResourceManager NodeManager |
node2 | NameNode DataNode JounalNode ZKFC ResourceManager NodeManager |
node3 | DataNode JournalNode ZKFC NodeManager |
配置zookeeper集群
# 在三个节点上部署Zookeeper
mv conf/zoo_sample.cfg conf/zoo.cfg
vim zoo.cfg
# 修改以下内容
dataDir=/opt/module/zookeeper/zkData
# 在文件末尾添加以下内容
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
文件配置
core-site.xml
<configuration>
<!-- 把两个NameNode)的地址组装成一个集群mycluster -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/ha/hadoop-2.6.0/data/tmp</value>
</property>
<!--zk的quorumPeer位置-->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>
hdfs-site.xml
<configuration>
<!-- 完全分布式集群名称 -->
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<!-- 集群中NameNode节点都有哪些 -->
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>node1:9000</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>node2:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>node1:50070</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>node2:50070</value>
</property>
<!-- 指定NameNode元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/mycluster</value>
</property>
<!-- 配置隔离机制,即同一时刻只能有一台服务器对外响应 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 使用隔离机制时需要ssh无秘钥登录-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/linux123/.ssh/id_rsa</value>
</property>
<!-- 声明journalnode服务器存储目录-->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/ha/hadoop-2.6.0/data/jn</value>
</property>
<!-- 关闭权限检查-->
<property>
<name>dfs.permissions.enable</name>
<value>false</value>
</property>
<!-- 访问代理类:client,mycluster,active配置失败自动切换实现方式-->
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--自动故障转移机制-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
</configuration>
yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--启用resourcemanager ha-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>cluster-yarn1</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node1</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node2</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<!--启用自动恢复-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--指定resourcemanager的状态信息存储在zookeeper集群-->
<property>
<name>yarn.resourcemanager.store.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
</configuration>
mapreduce-site.xml
<configuration>
<!-- 指定mr运行时框架,指定为yarn,默认为local(使用本地模式模拟分布式计算环境) -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
启动集群
# HDFS-HA
# 三节点启动Zookeeper服务
bin/zkServer.sh start
# 三节点运行JournalNode
sbin/hadoop-daemons.sh start journalnode
# 在node1上格式化NameNode
bin/hdfs namenode -format
# 启动node1上的NameNode
sbin/hadoop-daemon.sh start namenode
# 在node2上同步node2和node1的NameNode状态
bin/hdfs namenode -bootstrapStandby
# 启动node2上的NameNode
sbin/hadoop-daemon.sh start namenode
# 这时两个NN都为Standby状态
# 在node1上格式化ZKFC
bin/hdfs zkfc -formatZK
# 在NameNode所在上节点分别启动ZKFC,先启动的节点转为Active
sbin/hadoop-daemon.sh start zkfc
# 启动DataNode
sbin/hadoop-daemons.sh start datanode
# YARN-HA
# 直接使用群起脚本
sbin/start-yarn.sh
# 需要在node2上手动启动ResourceManager
sbin/yarn-daemon.sh start resourcemanager