hadoop2简明配置

core-site.xml

<configuration>

        <property>

                <name>fs.defaultFS</name>

                <value>hdfs://cf:8020/</value>

        </property>

</configuration>

hdfs-site.xml

<configuration>

        <property>

                <name>dfs.permissions.superusergroup</name>

                <value>hadoop</value>

        </property>

        <property>

                <name>dfs.namenode.name.dir</name>

                <value>file:/home/hadoop/works/1/dfs/nn</value>

        </property>

        <property>

                <name>dfs.namenode.checkpoint.dir</name>

                <value>file:/home/hadoop/works/1/dfs/snn</value>

        </property>

        <!-- checkpoint -->

        <property>

                <name>dfs.namenode.secondary.http-address</name>

                <value>index:50090</value>

        </property>

        <property>

                <name>dfs.namenode.checkpoint.period</name>

                <value>180</value>

        </property>

        <property>

                <name>dfs.namenode.checkpoint.check.period</name>

                <value>180</value>

        </property>

        <property>

                <name>dfs.datanode.data.dir</name>

                <value>file:/home/hadoop/works/1/dfs/dn,file:/home/hadoop/works/2/dfs/dn</value>

        </property>

        <property>

                <name>dfs.replication</name>

                <value>3</value>

        </property>

        <property>

                <name>dfs.datanode.failed.volumes.tolerated</name>

                <value>1</value>

        </property>

<!-- 官方文档中没提到这个,日志中报ugi被弊了,***这个也不行 -->

        <property>

                <name>hadoop.http.staticuser.user</name>

                <value>hadoop,hadoop</value>

        </property>

</configuration>

mapred-site.xml

<configuration>

        <property>

                <name>mapreduce.framework.name</name>

                <value>yarn</value>

        </property>

        <property>

                <name>mapred.child.java.opts</name>

                <value>-Xmx400m</value>

        </property>

</configuration>

yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->

  <property>

    <name>yarn.resourcemanager.resource-tracker.address</name>

    <value>cf:8031</value>

  </property>

  <property>

    <name>yarn.resourcemanager.address</name>

    <value>cf:8032</value>

  </property>

  <property>

    <name>yarn.resourcemanager.scheduler.address</name>

    <value>cf:8030</value>

  </property>

  <property>

    <name>yarn.resourcemanager.admin.address</name>

    <value>cf:8033</value>

  </property>

  <property>

    <name>yarn.resourcemanager.webapp.address</name>

    <value>cf:8088</value>

  </property>

  <property>

    <name>yarn.nodemanager.resource.memory-mb</name>

    <value>6144</value>

  </property>

  <property>

    <name>yarn.nodemanager.aux-services</name>

    <value>mapreduce.shuffle</value>

  </property>

  <property>

    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>

    <value>org.apache.hadoop.mapred.ShuffleHandler</value>

  </property>

  <property>

    <name>yarn.nodemanager.local-dirs</name>

    <value>/home/hadoop/work/1/yarn/local,/home/hadoop/work/2/yarn/local</value>

  </property>

  <property>

    <name>yarn.nodemanager.log-dirs</name>

    <value>/home/hadoop/work/1/yarn/logs,/home/hadoop/work/2/yarn/logs</value>

  </property>

</configuration>

slaves文件指定datanode的机器名。

系统环境变量

export JAVA_HOME=/usr/local/jdk

export HADOOP_PREFIX=~/hadoop-home/hadoop-2.0.0-cdh4.0.0

export HADOOP_MAPRED_HOME=$HADOOP_PREFIX

export HADOOP_COMMON_HOME=$HADOOP_PREFIX

export HADOOP_HDFS_HOME=$HADOOP_PREFIX

export YARN_HOME=$HADOOP_PREFIX

export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin

export HADOOP_LOG_DIR=

猜你喜欢

转载自yaven.iteye.com/blog/1684526