Script Shell pour démarrer rapidement le cluster Hadoop HA
1. Modifiez allzkServer.sh
vi allzkServer.sh
#!/bin/bash
case $1 in
"start"){
for i in slave1 slave2 slave3;
do
echo "*********$i zkServer start*********"
ssh $i "source /etc/profile;/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/zkServer.sh start";
done
};;
"stop"){
for i in slave1 slave2 slave3;
do
echo "*********$i zkServer stop*********"
ssh $i "source /etc/profile;/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/zkServer.sh stop";
done
};;
esac
Mettez allzkServer.sh sous ZOOKEEPER_HOME / bin du nœud slave1
2. Modifiez alljournal.sh
vi alljournal.sh
#!/bin/bash
case $1 in
"start"){
echo "*********slave1 journalnode start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode
for i in slave2 slave3;
do
echo "*********$i journalnode start*********"
ssh $i "source /etc/profile;/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode";
done
};;
"stop"){
echo "*********slave1 journalnode stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop journalnode
for i in slave2 slave3;
do
echo "*********$i journalnode stop*********"
ssh $i "source /etc/profile;/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop journalnode";
done
};;
esac
Placez alljournal.sh dans le répertoire HADOOP_HOME / bin du nœud slave1
3. Modifiez allhdfs.sh
vi allhdfs.sh
#!/bin/bash
case $1 in
"start"){
echo "*********slave zkServer start*********"
ssh slave1 "/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/allzkServer.sh start";
echo "*********slave journalnode start*********"
ssh slave1 "/usr/Software/Hadoop/hadoop-2.7.3/bin/alljournal.sh start";
echo "*********master1 namenode start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode
echo "*********master1 zkfc start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc
echo "*********master1 yarn start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/start-yarn.sh
echo "*********slave datanode start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemons.sh start datanode
echo "*********master2 namenode-bootstrapStandby start*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/bin/hdfs namenode -bootstrapStandby";
echo "*********master2 namenode start*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode";
echo "*********master2 zkfc start*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc";
echo "*********master2 resourcemanager start*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/yarn-daemon.sh start resourcemanager";
echo "*********master1 MRhistory start*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/mr-jobhistory-daemon.sh start historyserver
};;
"stop"){
echo "*********master1 MRhistory stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/mr-jobhistory-daemon.sh stop historyserver
echo "*********master2 resourcemanager stop*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/yarn-daemon.sh stop resourcemanager";
echo "*********master2 zkfc stop*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop zkfc";
echo "*********master2 namenode stop*********"
ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop namenode";
echo "*********slave datanode stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemons.sh stop datanode
echo "*********master1 yarn stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/stop-yarn.sh
echo "*********master1 zkfc stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop zkfc
echo "*********master1 namenode stop*********"
/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop namenode
echo "*********slave journalnode stop*********"
ssh slave1 "/usr/Software/Hadoop/hadoop-2.7.3/bin/alljournal.sh stop";
echo "*********slave zkServer stop*********"
ssh slave1 "/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/allzkServer.sh stop";
};;
esac
Placez allhdfs.sh dans le répertoire HADOOP_HOME / bin du nœud master1
4. Démarrez le cluster
Le script ci-dessus est placé dans le répertoire bin principalement parce qu'il est équipé des variables d'environnement correspondantes, qui peuvent être paresseuses
//在master1中启动
allhdfs.sh start //启动所有进程,包括MR历史任务进程
allhdfs.sh stop //关闭集群