Hadoop 2.0 cluster install




Master
hduser@master:~$ jps
8500 Jps
7476 DataNode
8150 NodeManager
7355 NameNode
8030 ResourceManager
hduser@master:~$

Slave
hduser@slave:~$ jps
7378 NodeManager
6628 DataNode
7510 Jps
6749 SecondaryNameNode
hduser@slave:~$

URL:

Name Node: http://192.168.56.101:50070/dfshealth.html#tab-overview
YARN Services: http://192.168.56.101:8088/cluster
Secondary Name Node: http://192.168.56.102:50090/status.html
Data Node 1: http://192.168.56.101:50075/
Data Node 2: http://192.168.56.101:50075/


-----------------------------------CONFIG--------------------------------

hduser@master:/usr/local/hadoop/etc/hadoop$ cat masters
slave
hduser@master:/usr/local/hadoop/etc/hadoop$ cat slaves
#localhost
master
slave
hduser@master:/usr/local/hadoop/etc/hadoop$ cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

 <property>
  <name>hadoop.tmp.dir</name>
  <value>/app/hadoop/tmp</value>
  <description>A base for other temporary directories.</description>
 </property>

 <property>
  <name>fs.default.name</name>
  <value>hdfs://master:54310</value>
  <description>The name of the default file system.  A URI whose
  scheme and authority determine the FileSystem implementation.  The
  uri's scheme determines the config property (fs.SCHEME.impl) naming
  the FileSystem implementation class.  The uri's authority is used to
  determine the host, port, etc. for a filesystem.</description>
 </property>

 change the default setting for secondary
 <property>
   <name>fs.checkpoint.period</name>
   <value>3600</value>
   <description>The number of seconds between two periodic checkpoints.
   </description>
 </property>
 <property>
   <name>fs.checkpoint.size</name>
   <value>67108864</value>
 </property>

</configuration>
hduser@master:/usr/local/hadoop/etc/hadoop$ cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!-- secify the server where secondary namenode runs  -->
<property>
  <name>dfs.http.address</name>
  <value>master:50070</value>
  <description>
   The address and the base port where the dfs namenode web ui will listen on.
   If the port is 0 then the server will start on a free port.
  </description>
</property>
<property>
 <name>dfs.namenode.secondary.http-address</name>
 <value>slave:50090</value>
</property>


 <property>
  <name>dfs.replication</name>
  <value>1</value>
  <description>Default block replication.
  The actual number of replications can be specified when the file is created.
  The default is used if replication is not specified in create time.
  </description>
 </property>
 <property>
   <name>dfs.namenode.name.dir</name>
   <value>file:/usr/local/hadoop_store/hdfs/namenode</value>
 </property>
 <property>
   <name>dfs.datanode.data.dir</name>
   <value>file:/usr/local/hadoop_store/hdfs/datanode</value>
 </property>

</configuration>
hduser@master:/usr/local/hadoop/etc/hadoop$ cat yarn-site.xml
<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!--    yarn cluster       -->
<property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
</property>



<!--if not user yarn -->
<!--  nothing   -->

<!-- Site specific YARN configuration properties -->

<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>


</configuration>
hduser@master:/usr/local/hadoop/etc/hadoop$ cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--
  <property>
  <name>mapred.job.tracker</name>
  <value>localhost:54311</value>
  <description>The host and port that the MapReduce job tracker runs
  at.  If "local", then jobs are run in-process as a single map
  and reduce task.
  </description>
 </property>
-->

<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>

</configuration>
hduser@master:/usr/local/hadoop/etc/hadoop$

reference

1.install java
https://my.oschina.net/mxs/blog/518826

wget http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64.tar.gz --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie"

2.install handoop
http://www.bogotobogo.com/Hadoop/BigData_hadoop_Install_on_ubuntu_single_node_cluster.php
https://my.oschina.net/itblog/blog/282694

#http://backtobazics.com/big-data/setup-multi-node-hadoop-2-6-0-cluster-with-yarn/

猜你喜欢

转载自15609845237.iteye.com/blog/2350883
今日推荐