基于HBase和ZK 高可用集群环境 Shell脚本启动、关闭、重启、查看进程

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_32297447/article/details/79607506
这是前面博主的总结: 基于Hbase&&ZK的Hadoop HA高可用环境搭建
将下面四个脚本分别存放在一个单独文件中,并存放在同一目录下,使用如下命令管理 hadoop ha 高可用集群的启动、关闭、重启、查看进程
sh hadoop-ha-cluster.sh start       # 启动集群
sh hadoop-ha-cluster.sh stop        # 关闭集群
sh hadoop-ha-cluster.sh restart     # 重启集群
sh hadoop-ha-cluster.sh status      # 查看每个节点上的进程

zookeeper 集群启动关闭管理脚本
#!/bin/bash
# FileName:zk-manage.sh
# Description:zookeeper 集群启动关闭管理脚本
# Author:david
SLAVES=$(cat /home/hadoop/app/hadoop/etc/hadoop/slaves)
#echo $SLAVES
start_time=`date +%s`
for slave in $SLAVES
do
        case $1 in
                start)    ssh -t $slave "zkServer.sh start" 1>/dev/null;;
                stop)     ssh -t $slave "zkServer.sh stop" 1>/dev/null;;
                status)   echo && ssh -t $slave "zkServer.sh status";;
                restart)  ssh -t $slave "zkServer.sh restart" 1>/dev/null;;
                *)        echo -e "Usage:sh zk-manage.sh {start|stop|status|restart} ^_^\n" && exit;;
        esac
done
end_time=`date +%s`
elapse_time=$((${end_time}-${start_time}))
echo -e "\n$1 ZooKeeper Server takes ${elapse_time} seconds\n"
hadoop 启动关闭管理脚本,hdfs、yarn及node2上的resourcemanager需要单独启动
#!/bin/bash
# FileName:hadoop-manage.sh
# Description:hadoop 启动关闭管理脚本,hdfs、yarn及node2上的resourcemanager需要单独启动
# Author:david
NameNode1=master
NameNode2=master26
start_time=`date +%s`

case $1 in
        start)
                ssh -t $NameNode1 "start-dfs.sh"
                ssh -t $NameNode1 "start-yarn.sh"
                ssh -t $NameNode2 "yarn-daemon.sh start resourcemanager"
        ;;
        stop)
                ssh -t $NameNode1 "stop-dfs.sh"
                ssh -t $NameNode1 "stop-yarn.sh"
                ssh -t $NameNode2 "yarn-daemon.sh stop resourcemanager"
        ;;
        *)
                echo -e "Usage: hadoop-manage.sh {start|stop} ^_^\n" && exit
        ;;
esac
end_time=`date +%s`
elapse_time=$((${end_time}-${start_time}))
echo -e "\n$1 Hadoop Server takes ${elapse_time} seconds\n"
hbase 集群启动关闭管理脚本

#!/bin/bash
# FileName:hbase-manage.sh
# Description:hbase 集群启动关闭管理脚本
# Author:david
# 主 HMaster
main_hmaster=master
# 备份 HMaster
bak_hmaster=master26
start_time=`date +%s`

case $1 in
    # 先启动主HMaster,再启动备份HMaster
    start)
ssh -t $main_hmaster << mhm1
start-hbase.sh
mhm1
ssh -t $bak_hmaster << bhm1
hbase-daemon.sh start master
bhm1
    ;;

    # 先关闭备份HMaster,再关闭主HMaster
    stop)
ssh -t $bak_hmaster << bhm2
hbase-daemon.sh stop master
bhm2
ssh -t $main_hmaster << mhm2
stop-hbase.sh
mhm2
    ;;
    *)  echo -e "Usage:sh hbase-manage.sh {start|stop} ^_^\n" && exit;;
esac

end_time=`date +%s`
elapse_time=$((${end_time}-${start_time}))
echo -e "\n$1 HBase Server takes ${elapse_time} seconds\n"
hadoop ha 高可用集群启动关闭脚本
#!/bin/bash
# FileName:hadoop-ha-cluster.sh
# Description:hadoop ha 高可用集群启动关闭脚本
# Author:david
#CLUSTER_CONF_PATH=$(cd "$(dirname "$0")"; pwd)
NameNode1=master
NameNode2=master26
DataNode1=dn23
DataNode2=dn24
DataNode3=dn25
start_time=`date +%s`

# 查看状态函数封装
function showJps(){
# 查看namenode1(node1)的进程
echo -e "\n**********************************************************************************"
ssh -t $NameNode1 << n1
echo "当前 $NameNode1 上的进程为: "
jps
exit
n1

# 查看namenode2(node2)的进程
echo -e "\n**********************************************************************************"
ssh -t $NameNode2 << n2
echo "当前 $NameNode2 上的进程为: "
jps
exit
n2

# 查看datanode1(node3)的进程
echo -e "\n**********************************************************************************"
ssh -t $DataNode1 << d1
echo "当前 $DataNode1 上的进程为: "
jps
exit
d1

# 查看datanode2(node4)的进程
echo -e "\n**********************************************************************************"
ssh -t $DataNode2 << d2
echo "当前 $DataNode2 上的进程为: "
jps
exit
d2

# 查看datanode3(node5)的进程
echo -e "\n**********************************************************************************"
ssh -t $DataNode3 << d3
echo "当前 $DataNode3 上的进程为: "
jps
exit
d3
}

case $1 in
        # 先启动zk,再启动hadoop,再启动hbase
        start)
                sh zk-manage.sh start
                sh hadoop-manage.sh start
   sh hbase-manage.sh start
        ;;
        # 先关闭hbase,再关闭hadoop,在关闭zk
        stop)
   sh hbase-manage.sh stop
                sh hadoop-manage.sh stop
                sh zk-manage.sh stop
        ;;
        # 先关闭hadoop,在重启zk,在启动hadoop
        restart)
               sh hadoop-ha-cluster.sh stop
        	sh hadoop-ha-cluster.sh start
  ;;
        # 显示进程
        status)
                showJps
        ;;
        *) echo -e "Usage: sh hadoop-ha-cluster.sh {start|stop|restart|status} ^_^\n"  ;;

esac
echo -e 
end_time=`date +%s`
elapse_time=$((${end_time}-${start_time}))
echo -e "\n$1 Hadoop HA Cluster Server takes ${elapse_time} seconds\n"
成功后如下

启动成功后显示如下:
***********************************************************
当前 node1 上的进程为: 
2804 DFSZKFailoverController
2503 NameNode
4263 Jps
2923 ResourceManager
3182 HMaster

***********************************************************
当前 node2 上的进程为: 
2587 ResourceManager
2460 DFSZKFailoverController
2860 HMaster
4220 Jps
2381 NameNode

***********************************************************
当前 node3 上的进程为: 
2388 QuorumPeerMain
2516 JournalNode
2622 NodeManager
3118 Jps
2447 DataNode

***********************************************************
当前 node4 上的进程为: 
2354 QuorumPeerMain
2419 DataNode
2599 NodeManager
2488 JournalNode
3356 Jps
2799 HRegionServer

***********************************************************
当前 node5 上的进程为: 
2784 HRegionServer
2582 NodeManager
2410 DataNode
3389 Jps
2351 QuorumPeerMain
2479 JournalNode

猜你喜欢

转载自blog.csdn.net/qq_32297447/article/details/79607506
今日推荐