MapReduce——WordCount

添加依赖

    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>2.6.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-hdfs</artifactId>
      <version>2.6.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-mapreduce-client-core</artifactId>
      <version>2.6.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
      <version>2.6.0</version>
    </dependency>

一、jar方式

package Hadoop;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCount {

    //四个参数,前两个为输入<key,value>对,后两个为输出<key,value>对;
    //LongWritable、IntWritable、Text可视为Java 的long、int、String替代品;
    public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{
        //一个标记单词个数的常量,值为1,这个常量也可以不定义,在后面程序直接用整数1代替,private final static定义的是常量;
        private final static IntWritable one = new IntWritable(1);
        //充当中间变量,存储词;
        private Text word = new Text();
        //map方法,key为偏移量,对value进行拆分,<span style="font-family: Arial, Helvetica, sans-serif;">context为上下文机制;</span>
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            //对转换的字符串进行分隔;
            StringTokenizer itr = new StringTokenizer(value.toString());
            //利用循环函数进行依次处理;
            while (itr.hasMoreTokens()) {
                //返回从当前位置到下一个分隔符的字符串;
                word.set(itr.nextToken());
                //如 context.write("hello",1);
                context.write(word, one);
            }
        }
    }

    //四个参数,前两个为输入<key,value>对,后两个为输出<key,value>对;
    public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
        //定义一个变量;
        private IntWritable result = new IntWritable();
        //reduce方法,key为如 "hello",Iterable遍历所有key的个数;
        public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
            //  用于记录key个数的变量;
            int sum = 0;
            //求key的个数;
            for (IntWritable val : values) {
                sum += val.get();
            }
            //把sum个数存到result中去;
            result.set(sum);
            //如 context.write("hello",7);
            context.write(key, result);
        }
    }

    //主方法;
    public static void main(String[] args) throws Exception {
        //指定作业执行规范;
        Configuration conf = new Configuration();
        //这里需要配置参数即输入和输出的HDFS的文件路径
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length < 2) {
            System.err.println("Usage: wordcount <in> [<in>...] <out>");
            System.exit(2);
        }
        //设置Job名称、运行对象;
        Job job = new Job(conf, "word count");
        job.setJarByClass(WordCount.class);
        //为job设置map类;
        job.setMapperClass(TokenizerMapper.class);
        //为job设置Combiner类;
        job.setCombinerClass(IntSumReducer.class);
        //为job设置 reduce类;
        job.setReducerClass(IntSumReducer.class);
        //设置输出key类型;
        job.setOutputKeyClass(Text.class);
        //设置输出value类型;
        job.setOutputValueClass(IntWritable.class);
        //设置输入路径;
        for (int i = 0; i < otherArgs.length - 1; ++i) {
            FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
        }
        //设置输出路径;
        FileOutputFormat.setOutputPath(job,new Path(otherArgs[otherArgs.length - 1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}

 生成jar,并将其拷贝到/usr/local/hadoop 目录下,执行以下命令

hadoop  jar  /usr/local/hadoop/MavenMapReduceHelloWorld-1.0-SNAPSHOT.jar
Hadoop.WordCount /input /output

结果

 

二、IDEA 远程执行 

package Hadoop;
import java.io.IOException;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount2 {

    //四个参数,前两个为输入<key,value>对,后两个为输出<key,value>对;
    //LongWritable、IntWritable、Text可视为Java 的long、int、String替代品;
    public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{
        //一个标记单词个数的常量,值为1,这个常量也可以不定义,在后面程序直接用整数1代替,private final static定义的是常量;
        private final static IntWritable one = new IntWritable(1);
        //充当中间变量,存储词;
        private Text word = new Text();
        //map方法,key为偏移量,对value进行拆分,<span style="font-family: Arial, Helvetica, sans-serif;">context为上下文机制;</span>
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            System.out.println("Map key:" + key + ",value:" + value);
            //对转换的字符串进行分隔;
            StringTokenizer itr = new StringTokenizer(value.toString());
            //利用循环函数进行依次处理;
            while (itr.hasMoreTokens()) {
                //返回从当前位置到下一个分隔符的字符串;
                word.set(itr.nextToken());
                //如 context.write("hello",1);
                context.write(word, one);
            }
        }
    }

    //四个参数,前两个为输入<key,value>对,后两个为输出<key,value>对;
    public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
        //定义一个变量;
        private IntWritable result = new IntWritable();
        //reduce方法,key为如 "hello",Iterable遍历所有key的个数;
        public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
            StringBuffer sb = new StringBuffer();
            sb.append("Reduce key:" + key + ",value:");
            //  用于记录key个数的变量;
            int sum = 0;
            //求key的个数;
            for (IntWritable val : values) {
                sb.append(val.get()+" ");
                sum += val.get();
            }
            System.out.println(sb.toString());
            //把sum个数存到result中去;
            result.set(sum);
            //如 context.write("hello",7);
            context.write(key, result);
        }
    }

    //主方法;
    public static void main(String[] args) throws Exception {
        //指定作业执行规范;
        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://192.168.255.128:9000");
        System.setProperty("HADOOP_USER_NAME", "root");
        //hadoop2.6 文件夹放在 hadoop2.6.rar 中
        System.setProperty("hadoop.home.dir", "E:/hadoop2.6");
        final String OUTPUT_PATH="hdfs://192.168.255.128:9000/output";
        Path outpath = new Path(OUTPUT_PATH);

        //清空原先的数据
        FileSystem fs = FileSystem.get(new URI(OUTPUT_PATH),conf);
        if(fs.exists(outpath)){
            fs.delete(outpath,true);
        }

        //设置Job名称、运行对象;
        Job job = new Job(conf, "word count");
        job.setJarByClass(WordCount.class);
        //为job设置map类;
        job.setMapperClass(TokenizerMapper.class);
        //为job设置Combiner类;
        job.setCombinerClass(IntSumReducer.class);
        //为job设置 reduce类;
        job.setReducerClass(IntSumReducer.class);
        //设置输出key类型;
        job.setOutputKeyClass(Text.class);
        //设置输出value类型;
        job.setOutputValueClass(IntWritable.class);

        //设置输入路径
        FileInputFormat.addInputPath(job, new Path("hdfs://192.168.255.128:9000/input"));
        //设置输出路径;
        FileOutputFormat.setOutputPath(job,new  Path("hdfs://192.168.255.128:9000/output"));

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}

将 hadoop2.6/bin 文件夹下面 的hadoop.dll拷贝到C:\Windows\ System32

猜你喜欢

转载自blog.csdn.net/Milan__Kundera/article/details/83753020