版权声明:个人原创,转载请标注! https://blog.csdn.net/Z_Date/article/details/83861801
Mapreduce中会将map输出的kv对,按照相同key分组,然后分发给不同的reducetask默认的分发规则为:根据key的hashcode%reducetask数来分发,所以:如果要按照我们自己的需求进行分组,则需要改写数据分发(分组)组件Partitioner。
-
自定义一个CustomPartitioner继承抽象类:Partitioner
-
然后在job对象中,设置自定义partitioner: job.setPartitionerClass(CustomPartitioner.class)
自定义的partition类
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* 自定义分区类
* @author lyd
*
*
*注意:
*1、该类需要继承Partitioner类
*2、分区的类型需要和map端的输出(reduce端的输入)相同
*3、getPartition()方法只能返回int类型值
*4、分区数量需要和reduceTask的数量相等
*5、分区返回值尽量用%(模于)方式做。根据业务做
*6、默认使用HashPartitoiner。
*/
public class MyPartitioner extends Partitioner<Text, Text>{
/**
*
*/
@Override
public int getPartition(Text key, Text value, int numPartitions) {
String firstChar = key.toString().substring(0, 1);
//判断
if(firstChar.matches("[a-z]")){
return 1%numPartitions;
} else if (firstChar.matches("[A-Z]")){
return 2%numPartitions;
} else if (firstChar.matches("[0-9]")){
return 3%numPartitions;
} else {
return 4%numPartitions;
}
}
}
PartitionerDemo
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 分区
* @author lyd
*
*输入数据
*hello world
*hi qianfeng
*Hi qianfeng
*Hello Qianfeng
*QQ
*163.com
*1603
*@qq.com
**123
**123
*(123)
*
*单词首字母为a-z的单词放到一个输出文件,并统计
*单词首字母为A-Z的单词放到一个输出文件,并统计
*单词首字母为0-9的单词放到一个输出文件,并统计
*单词首字母为其它的单词放到一个输出文件,并统计
*
*
*任务:
*lh db
*zyt zy
*ls zy
*hgw jc
*yxx hd
*hz hb
*xyd hb
*hj hb
*cs hb
*/
public class PartitionerDemo {
//自定义myMapper
public static class MyMapper extends Mapper<LongWritable, Text, Text, Text>{
//只在map方法运行之前执行一次。(仅执行一次)
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
}
Text k = new Text();
Text v = new Text("1");
@Override
protected void map(LongWritable key, Text value,Context context)
throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer st = new StringTokenizer(line);
while (st.hasMoreTokens()) {
k.set(st.nextToken());
context.write(k, v);
}
}
//map方法运行完后执行一次(仅执行一次)
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
}
}
//自定义myReducer
public static class MyReducer extends Reducer<Text, Text, Text, Text>{
//在reduce方法执行之前执行一次。(仅一次)
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
}
Text v = new Text();
@Override
protected void reduce(Text key, Iterable<Text> value,Context context)
throws IOException, InterruptedException {
int counter = 0;
for (Text t : value) {
counter += Integer.parseInt(t.toString());
}
v.set(counter+"");
context.write(key, v);
}
//在reduce方法执行之后执行一次。(仅一次)
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
}
}
/**
* job的驱动方法
* @param args
*/
public static void main(String[] args) {
try {
//1、获取Conf
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
//2、创建job
Job job = Job.getInstance(conf, "model01");
//3、设置运行job的class
job.setJarByClass(PartitionerDemo.class);
//4、设置map相关属性
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
//设置partitioner的相关属性
job.setPartitionerClass(MyPartitioner.class);
job.setNumReduceTasks(4); //设置reduce task数量。该数值需要和分区数相等
//5、设置reduce相关属性
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//判断输出目录是否存在,若存在则删除
FileSystem fs = FileSystem.get(conf);
if(fs.exists(new Path(args[1]))){
fs.delete(new Path(args[1]), true);
}
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//6、提交运行job
int isok = job.waitForCompletion(true) ? 0 : 1;
//退出
System.exit(isok);
} catch (IOException | ClassNotFoundException | InterruptedException e) {
e.printStackTrace();
}
}
}