reduce端 join算法实现

实现机制:
通过将关联的条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联

在这里插入图片描述

第一步:定义Ord_ProBean

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class Ord_Pro implements Writable {
    
    
    private String id;
    private String date;
    private String pid;
    private String amount;
    private String name;
    private String categoryId;
    private String price;

    @Override
    public String toString() {
    
    
        return  id + '\t' +date + '\t' +amount + '\t' +name + '\t' + categoryId + '\t' +price + '\t';
    }

    public Ord_Pro(String id, String date, String pid, String amount, String name, String categoryId, String price) {
    
    
        this.id = id;
        this.date = date;
        this.pid = pid;
        this.amount = amount;
        this.name = name;
        this.categoryId = categoryId;
        this.price = price;
    }

    public Ord_Pro() {
    
    
    }

    public String getId() {
    
    
        return id;
    }

    public void setId(String id) {
    
    
        this.id = id;
    }

    public String getDate() {
    
    
        return date;
    }

    public void setDate(String date) {
    
    
        this.date = date;
    }

    public String getPid() {
    
    
        return pid;
    }

    public void setPid(String pid) {
    
    
        this.pid = pid;
    }

    public String getAmount() {
    
    
        return amount;
    }

    public void setAmount(String amount) {
    
    
        this.amount = amount;
    }

    public String getName() {
    
    
        return name;
    }

    public void setName(String name) {
    
    
        this.name = name;
    }

    public String getCategoryId() {
    
    
        return categoryId;
    }

    public void setCategoryId(String categoryId) {
    
    
        this.categoryId = categoryId;
    }

    public String getPrice() {
    
    
        return price;
    }

    public void setPrice(String price) {
    
    
        this.price = price;
    }

    @Override
    public void write(DataOutput dataOutput) throws IOException {
    
    
        dataOutput.writeUTF(id+"");
        dataOutput.writeUTF(date+"");
        dataOutput.writeUTF(pid+"");
        dataOutput.writeUTF(amount+"");
        dataOutput.writeUTF(name+"");
        dataOutput.writeUTF(categoryId+"");
        dataOutput.writeUTF(price+"");

    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {
    
    
        this.id =  dataInput.readUTF();
        this.date =  dataInput.readUTF();
        this.pid =  dataInput.readUTF();
        this.amount =  dataInput.readUTF();
        this.name =  dataInput.readUTF();
        this.categoryId =  dataInput.readUTF();
        this.price =  dataInput.readUTF();

    }
}

第二步:定义map类

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class MapDemo01 extends Mapper<LongWritable, Text,Text,Text> {
    
    
    private Ord_Pro Ord_Pro = new Ord_Pro();
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    
    
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        String[] split = value.toString().split(",");
        if (inputSplit.getPath().getName().contains("order")) {
    
    
            context.write(new Text(split[2]),new Text(","+split[0]+","+split[1]+","+split[3]));

        }else {
    
    
            context.write(new Text(split[0]),new Text(split[1]+","+split[2]+","+split[3]));
        }
    }
}

第三步:自定义reduce类

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class ReduceDemo01 extends Reducer<Text, Text,Text,Text> {
    
    
    private Ord_Pro Ord_Pro;
    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
    
    
        String str="";
        for (Text value : values) {
    
    
            String val = value.toString();
            if (!val.startsWith(",")){
    
    
                str=val+str;
            }else {
    
    
                str=str+val;
            }
        }
        context.write(key,new Text(str));

    }
}

第四步:程序main函数入口

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class DriverDemo01 {
    
    
    public static void main(String[] args) throws Exception {
    
    

        Configuration configuration = new Configuration();

        Job job = Job.getInstance(configuration, "111");

        job.setJarByClass(DriverDemo01.class);
        job.setMapperClass(MapDemo01.class);
        job.setReducerClass(ReduceDemo01.class);


        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job,new Path("E:\\input"));
        FileOutputFormat.setOutputPath(job,new Path("E:\\output"));

        boolean b = job.waitForCompletion(true);
        System.exit(b?0:1);
    }
}

猜你喜欢

转载自blog.csdn.net/tian_1_2_3/article/details/110394226