hadoop集群hdfs和yarn开启HA配置

namenode开启HA

core-site.xml文件配置

<configuration>
<!------------------------------------------------------------------------------------------------->
        <property> 
                <name>fs.defaultFS</name>
                <value>hdfs://ns</value>
        </property>
        <property>
                <name>ha.zookeeper.quorum</name>
                <value>manager:2181,slaver1:2181,slaver2:2181</value>
        </property>
<!------------------------------------------------------------------------------------------------->
        <property>
                <name>io.file.buffer.size</name>
                <value>131072</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>file:/usr/temp</value>
        </property>
        <property>
                <name>hadoop.proxyuser.root.hosts</name>
                <value>*</value>
        </property>
        <property>
                <name>hadoop.proxyuser.root.groups</name>
                <value>*</value>
        </property>
</configuration>

hdfs-site.xml文件配置

<configuration>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>file:/usr/dfs/name</value>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>file:/usr/dfs/data</value>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>3</value>
        </property>
        <property>
                <name>dfs.webhdfs.enabled</name>
                <value>true</value>
        </property>
        <property>
                <name>dfs.permissions</name>
                <value>false</value>
        </property>
        <property>
                <name>dfs.web.ugi</name>
                <value>supergroup</value>
        </property>
<!------------------------------------------------------------------------------------------------->
        <property>
                <name>dfs.nameservices</name>
                <value>ns</value>
        </property>
        <property>
                <name>dfs.ha.namenodes.ns</name>
                <value>nn1,nn2</value>
        </property>
        <property>
                <name>dfs.namenode.rpc-address.ns.nn1</name>
                <value>manager:9000</value>
        </property>
        <property>
                <name>dfs.namenode.http-address.ns.nn1</name>
                <value>manager:9870</value>
        </property>
        <property>
                <name>dfs.namenode.rpc-address.ns.nn2</name>
                <value>slaver1:9000</value>
        </property>
        <property>
                <name>dfs.namenode.http-address.ns.nn2</name>
                <value>slaver1:9870</value>
        </property>
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://manager:8485;slaver1:8485;slaver2:8485/ns</value>
        </property>
        <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>
        <property>
                <name>dfs.client.failover.proxy.provider.ns</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailOverProxyProvider</value>
        </property>
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>sshfence(root:2022)</value>
        </property>

        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/root/.ssh/id_rsa</value>
        </property>
 <!------------------------------------------------------------------------------------------------->

resourcemanager开启HA

yarn-site.xml文件配置

<configuration>

<!-- Site specific YARN configuration properties -->
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
                <value>org.apache.hadoop.mapred.ShuffleHandler</value>
        </property>
        <!------------------------------------------------------------------------------------------------->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>ljk</value>
        </property>
        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>
        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>manager</value>
        </property>
        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>slaver1</value>
        </property>
        <property>
                <name>yarn.resourcemanager.recovery.enabled</name>
                <value>true</value>
        </property>
        <property>
              <name>yarn.nodemanager.local-dirs</name>
              <value>/usr/yarn/local</value>
        </property>
        <property>
              <name>yarn.resourcemanager.store.class</name>
              <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
        </property>
        <property>
              <name>yarn.resourcemanager.zk-address</name>
              <value>manager:2181,slaver1:2181,slaver2:2181</value>
        </property>
 <!------------------------------------------------------------------------------------------------->
</configuration>

启动顺序

1.启动zk
2.启动jn集群
 	hadoop-daemons.sh start journalnode 
3.格式化zkfc,在zookeeper中创建ha节点
 	hdfs zkfc -formatZK
4.格式化namenode 
	在active节点上执行: hdfs namenode -format
	如果是非HA->HA执行:bin/hdfs namenode -initializeSharedEdits
5.启动namenode
	在active节点执行:hadoop-daemon.sh start namenode
	在standby节点上执行:hdfs namenode -bootstrapStandby hadoop-daemon.sh start namenode
6.启动datanode
7.启动zkfc
	在每个namenode上启动:hadoop daemon.sh start zkfc
8.启动yarn
	在active节点启动:sbin/start-yarn.sh
	在backup节点启动:yarn-daemon.sh start resourcemanager

猜你喜欢

转载自blog.csdn.net/qq_29989725/article/details/107986998