MapReduce多表join

本文以user.log、goods.log两张表的合并来举例。

1、编写mapper类

class JoinMRMapper extends Mapper<LongWritable, Text, Text, Text> {
   @Override
   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      
      /*获取文件切片*/
      FileSplit fileSplit = (FileSplit) context.getInputSplit();
      
      /*获取文件的文件名*/
      String name = fileSplit.getPath().getName();
      String[] splits = value.toString().split(",");
      /*判断获取的文件切片是属于user.log文件还是goods.log文件的*/
      if (name.equals("user.log")) {
         String userID = splits[0];
         String userName = splits[1];
         String userAge = splits[2];
         /*此处value拼接上name是为了在reduce阶段进行区分文件所属哪个log*/
         context.write(new Text(userID), new Text(name + "-" + userName + "," + userAge));
      } else {
         String goodID = splits[0];
         String userID = splits[1];
         String goodPrice = splits[2];
         String ts = splits[3];
         context.write(new Text(userID), new Text(name + "-" + goodID + "," + goodPrice + "," + ts));
      }
   }
}
2、编写reducer类

class JoinMRReducer extends Reducer<Text, Text, Text, NullWritable> {
   @Override
   protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
      List<String> userList = new ArrayList<>();
      List<String> goodList = new ArrayList<>();
      for (Text t : values) {
         /*获取name,根据name判断得到的切片属于哪个文件,然后添加到对应的list列表中*/
         String[] splits = t.toString().split("-");
         if (splits[0].equals("user.log")) {
            userList.add(splits[1]);
         } else {
            goodList.add(splits[1]);
         }
      }
      /*获取列表长度*/
      int userLength = userList.size();
      int goodLength = goodList.size();
      for (int i = 0; i < userLength; i++) {
         for (int j = 0; j < goodLength; j++) {
            /*key值为用户ID,按照循环将两张表进行join*/
            String keyout = key.toString() + "," + (userList.get(i) + "," + goodList.get(j));
            context.write(new Text(keyout), NullWritable.get());
         }
      }
   }
}
3、编写driver类
public class JoinMR {
   public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
      /*配置*/
      Configuration conf = new Configuration();
      conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
      System.setProperty("HADOOP_USER_NAME", "hadoop");

      /*job实例化*/
      Job job = Job.getInstance(conf);

      job.setJarByClass(JoinMR.class);

      /*部署map-reduce组件*/
      job.setMapperClass(JoinMRMapper.class);
      job.setReducerClass(JoinMRReducer.class);

      /*设置map输出的k-v类型*/
      job.setMapOutputKeyClass(Text.class);
      job.setMapOutputValueClass(Text.class);

      /*最后输出的k-v格式类型*/
      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(NullWritable.class);

      /*设置任务数*//*
      job.setNumReduceTasks(6);*/

      /*输入输出路径*/
      Path inputUser = new Path("/hadoop/input/user.log");
      Path inputGoods = new Path("/hadoop/input/goods.log");
      Path output = new Path("/hadoop/output/user_goods");
      /*去除重复的文件夹*/
      FileSystem fs = FileSystem.get(conf);
      if (fs.exists(output)) {
         fs.delete(output, true);
      }

      /*FileInputFormat.addInputPath兼容FileInputFormat.setInputPath,通常用addInputPath*/
      FileInputFormat.addInputPath(job, inputUser);
      FileInputFormat.addInputPath(job, inputGoods);
      FileOutputFormat.setOutputPath(job, output);

      /*判断任务是否结束*/
      boolean b = job.waitForCompletion(true);
      System.exit(b ? 0 : 1);
      
   }
}

4、依赖的包

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

5、完整代码

package join;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class JoinMR {
   public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
      /*配置*/
      Configuration conf = new Configuration();
      conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
      System.setProperty("HADOOP_USER_NAME", "hadoop");

      /*job实例化*/
      Job job = Job.getInstance(conf);

      job.setJarByClass(JoinMR.class);

      /*部署map-reduce组件*/
      job.setMapperClass(JoinMRMapper.class);
      job.setReducerClass(JoinMRReducer.class);

      /*设置map输出的k-v类型*/
      job.setMapOutputKeyClass(Text.class);
      job.setMapOutputValueClass(Text.class);

      /*最后输出的k-v格式类型*/
      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(NullWritable.class);

      /*设置任务数*//*
      job.setNumReduceTasks(6);*/

      /*输入输出路径*/
      Path inputUser = new Path("/hadoop/input/user.log");
      Path inputGoods = new Path("/hadoop/input/goods.log");
      Path output = new Path("/hadoop/output/user_goods");
      /*去除重复的文件夹*/
      FileSystem fs = FileSystem.get(conf);
      if (fs.exists(output)) {
         fs.delete(output, true);
      }

      /*FileInputFormat.addInputPath兼容FileInputFormat.setInputPath,通常用addInputPath*/
      FileInputFormat.addInputPath(job, inputUser);
      FileInputFormat.addInputPath(job, inputGoods);
      FileOutputFormat.setOutputPath(job, output);

      /*判断任务是否结束*/
      boolean b = job.waitForCompletion(true);
      System.exit(b ? 0 : 1);

   }
}

class JoinMRMapper extends Mapper<LongWritable, Text, Text, Text> {
   @Override
   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

      /*获取文件切片*/
      FileSplit fileSplit = (FileSplit) context.getInputSplit();

      /*获取文件的文件名*/
      String name = fileSplit.getPath().getName();
      String[] splits = value.toString().split(",");
      /*判断获取的文件切片是属于user.log文件还是goods.log文件的*/
      if (name.equals("user.log")) {
         String userID = splits[0];
         String userName = splits[1];
         String userAge = splits[2];
         /*此处value拼接上name是为了在reduce阶段进行区分文件所属哪个log*/
         context.write(new Text(userID), new Text(name + "-" + userName + "," + userAge));
      } else {
         String goodID = splits[0];
         String userID = splits[1];
         String goodPrice = splits[2];
         String ts = splits[3];
         context.write(new Text(userID), new Text(name + "-" + goodID + "," + goodPrice + "," + ts));
      }
   }
}

class JoinMRReducer extends Reducer<Text, Text, Text, NullWritable> {
   @Override
   protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
      List<String> userList = new ArrayList<>();
      List<String> goodList = new ArrayList<>();
      for (Text t : values) {
         /*获取name,根据name判断得到的切片属于哪个文件,然后添加到对应的list列表中*/
         String[] splits = t.toString().split("-");
         if (splits[0].equals("user.log")) {
            userList.add(splits[1]);
         } else {
            goodList.add(splits[1]);
         }
      }
      /*获取列表长度*/
      int userLength = userList.size();
      int goodLength = goodList.size();
      for (int i = 0; i < userLength; i++) {
         for (int j = 0; j < goodLength; j++) {
            /*key值为用户ID,按照循环将两张表进行join*/
            String keyout = key.toString() + "," + (userList.get(i) + "," + goodList.get(j));
            context.write(new Text(keyout), NullWritable.get());
         }
      }
   }
}

6、运行结果

18/05/23 11:18:14 INFO mapreduce.Job:  map 100% reduce 0%
18/05/23 11:18:14 INFO mapred.Task:  Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@118b3f88
18/05/23 11:18:14 INFO mapred.ReduceTask: Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@2580b60e
18/05/23 11:18:14 INFO reduce.MergeManagerImpl: MergerManager: memoryLimit=1294781568, maxSingleShuffleLimit=323695392, mergeThreshold=854555840, ioSortFactor=10, memToMemMergeOutputsThreshold=10
18/05/23 11:18:14 INFO reduce.EventFetcher: attempt_local1763534138_0001_r_000000_0 Thread started: EventFetcher for fetching Map Completion Events
18/05/23 11:18:14 INFO reduce.LocalFetcher: localfetcher#1 about to shuffle output of map attempt_local1763534138_0001_m_000001_0 decomp: 218030 len: 218034 to MEMORY
18/05/23 11:18:14 INFO reduce.InMemoryMapOutput: Read 218030 bytes from map-output for attempt_local1763534138_0001_m_000001_0
18/05/23 11:18:14 INFO reduce.MergeManagerImpl: closeInMemoryFile -> map-output of size: 218030, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->218030
18/05/23 11:18:14 INFO reduce.LocalFetcher: localfetcher#1 about to shuffle output of map attempt_local1763534138_0001_m_000000_0 decomp: 37596850 len: 37596854 to MEMORY
18/05/23 11:18:14 INFO reduce.InMemoryMapOutput: Read 37596850 bytes from map-output for attempt_local1763534138_0001_m_000000_0
18/05/23 11:18:14 INFO reduce.MergeManagerImpl: closeInMemoryFile -> map-output of size: 37596850, inMemoryMapOutputs.size() -> 2, commitMemory -> 218030, usedMemory ->37814880
18/05/23 11:18:14 INFO reduce.EventFetcher: EventFetcher is interrupted.. Returning
18/05/23 11:18:14 INFO mapred.LocalJobRunner: 2 / 2 copied.
18/05/23 11:18:14 INFO reduce.MergeManagerImpl: finalMerge called with 2 in-memory map-outputs and 0 on-disk map-outputs
18/05/23 11:18:14 INFO mapred.Merger: Merging 2 sorted segments
18/05/23 11:18:14 INFO mapred.Merger: Down to the last merge-pass, with 2 segments left of total size: 37814872 bytes
18/05/23 11:18:15 INFO reduce.MergeManagerImpl: Merged 2 segments, 37814880 bytes to disk to satisfy reduce memory limit
18/05/23 11:18:15 INFO reduce.MergeManagerImpl: Merging 1 files, 37814882 bytes from disk
18/05/23 11:18:15 INFO reduce.MergeManagerImpl: Merging 0 segments, 0 bytes from memory into reduce
18/05/23 11:18:15 INFO mapred.Merger: Merging 1 sorted segments
18/05/23 11:18:15 INFO mapred.Merger: Down to the last merge-pass, with 1 segments left of total size: 37814874 bytes
18/05/23 11:18:15 INFO mapred.LocalJobRunner: 2 / 2 copied.
18/05/23 11:18:15 INFO Configuration.deprecation: mapred.skip.on is deprecated. Instead, use mapreduce.job.skiprecords
18/05/23 11:18:20 INFO mapred.LocalJobRunner: reduce > reduce
18/05/23 11:18:20 INFO mapreduce.Job:  map 100% reduce 90%
18/05/23 11:18:22 INFO mapred.Task: Task:attempt_local1763534138_0001_r_000000_0 is done. And is in the process of committing
18/05/23 11:18:22 INFO mapred.LocalJobRunner: reduce > reduce
18/05/23 11:18:22 INFO mapred.Task: Task attempt_local1763534138_0001_r_000000_0 is allowed to commit now
18/05/23 11:18:22 INFO output.FileOutputCommitter: Saved output of task 'attempt_local1763534138_0001_r_000000_0' to hdfs://hadoop01:9000/hadoop/output/movie1/_temporary/0/task_local1763534138_0001_r_000000
18/05/23 11:18:22 INFO mapred.LocalJobRunner: reduce > reduce
18/05/23 11:18:22 INFO mapred.Task: Task 'attempt_local1763534138_0001_r_000000_0' done.
18/05/23 11:18:22 INFO mapred.LocalJobRunner: Finishing task: attempt_local1763534138_0001_r_000000_0
18/05/23 11:18:22 INFO mapred.LocalJobRunner: reduce task executor complete.
18/05/23 11:18:22 INFO mapreduce.Job:  map 100% reduce 100%
18/05/23 11:18:22 INFO mapreduce.Job: Job job_local1763534138_0001 completed successfully
18/05/23 11:18:22 INFO mapreduce.Job: Counters: 38
File System Counters
FILE: Number of bytes read=75631291
FILE: Number of bytes written=151812342
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=74125257
HDFS: Number of bytes written=64365925
HDFS: Number of read operations=31
HDFS: Number of large read operations=0
HDFS: Number of write operations=8
Map-Reduce Framework
Map input records=1004092
Map output records=1004092
Map output bytes=35806692
Map output materialized bytes=37814888
Input split bytes=219
Combine input records=0
Combine output records=0
Reduce input groups=3883
Reduce shuffle bytes=37814888
Reduce input records=1004092
Reduce output records=1000209
Spilled Records=2008184
Shuffled Maps =2
Failed Shuffles=0
Merged Map outputs=2
GC time elapsed (ms)=59
CPU time spent (ms)=0
Physical memory (bytes) snapshot=0
Virtual memory (bytes) snapshot=0
Total committed heap usage (bytes)=1905786880
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters 
Bytes Read=24765563
File Output Format Counters 
Bytes Written=64365925

猜你喜欢

转载自blog.csdn.net/lukabruce/article/details/80415831