工作过程工遇到有些情况,需要多个mapreduce程序相互依赖执行,使用ControlledJob类可以轻松处理多个JOB之间按照顺序执行。
输入文件为2个,t1.txt和t2.txt,内容如下:
t1:
a
b
a
c
d
a
b
t2:
c
a
e
d
b
a
e
c
c
执行如下命令:
hadoop jar /home/sospdm/morejobs.jar morejobs.morejobs input output output1
分别得到2个输出文件夹output和output1,内容如下:
[sospdm@master2-dev ~]$ hadoop fs -cat output/part-r-00000
a 5
b 3
c 4
d 2
e 2
[sospdm@master2-dev ~]$ hadoop fs -cat output1/part-r-00000
sum 16
经过处理后第一个MAPREDUCE程序得到词频,第二个程序以第一个输出文件夹作为输入,得出总词频。
代码:
package morejobs; import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob; import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.util.regex.Matcher; import java.util.regex.Pattern;; public class morejobs { /***************第一个MAPREDUCE实现单词计数 * * @author zhangliang * */ //第一个Job的map函数 public static class Map_First extends Mapper<Object, Text ,Text , IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text keys = new Text(); public void map(Object key,Text value, Context context ) throws IOException, InterruptedException { String s = value.toString(); keys.set(s); context.write(keys, one); } } //第一个Job的reduce函数 public static class Reduce_First extends Reducer<Text, IntWritable, Text, IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key,Iterable<IntWritable>values, Context context) throws IOException, InterruptedException { int sum = 0; for(IntWritable value:values) { sum += value.get(); } result.set(sum); context.write(key, result); } } /***************************第二个MAPREDUCE实现计数后求和 * * @author 11100198 * */ //第二个job的map函数 public static class Map_Second extends Mapper<Object, Text ,Text , IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text keys = new Text("sum"); public void map(Object key,Text value, Context context ) throws IOException, InterruptedException { Pattern p = Pattern.compile("\\d{1,10}$"); Matcher m = p.matcher(value.toString()); m.find(); int s = Integer.valueOf(m.group()).intValue(); one.set(s); context.write(keys, one); } } //第二个Job的reduce函数 public static class Reduce_Second extends Reducer<Text, IntWritable, Text, IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key,Iterable<IntWritable>values, Context context) throws IOException, InterruptedException { int sum = 0; for(IntWritable value:values) { sum += value.get(); } result.set(sum); context.write(key, result); } } /********************** * 以下JOB配置参考:http://www.tuicool.com/articles/N7buuy * @param args * @throws IOException */ //启动函数 public static void main(String[] args) throws IOException { JobConf conf = new JobConf(morejobs.class); //第一个job的配置 Job job1 = new Job(conf,"join1"); job1.setJarByClass(morejobs.class); job1.setMapperClass(Map_First.class); job1.setReducerClass(Reduce_First.class); job1.setMapOutputKeyClass(Text.class);//map阶段的输出的key job1.setMapOutputValueClass(IntWritable.class);//map阶段的输出的value job1.setOutputKeyClass(Text.class);//reduce阶段的输出的key job1.setOutputValueClass(IntWritable.class);//reduce阶段的输出的value //加入控制容器 ControlledJob ctrljob1=new ControlledJob(conf); ctrljob1.setJob(job1); //job1的输入输出文件路径 FileInputFormat.addInputPath(job1, new Path(args[0])); FileOutputFormat.setOutputPath(job1, new Path(args[1])); //第二个作业的配置 Job job2=new Job(conf,"Join2"); job2.setJarByClass(morejobs.class); job2.setMapperClass(Map_Second.class); job2.setReducerClass(Reduce_Second.class); job2.setMapOutputKeyClass(Text.class);//map阶段的输出的key job2.setMapOutputValueClass(IntWritable.class);//map阶段的输出的value job2.setOutputKeyClass(Text.class);//reduce阶段的输出的key job2.setOutputValueClass(IntWritable.class);//reduce阶段的输出的value //作业2加入控制容器 ControlledJob ctrljob2=new ControlledJob(conf); ctrljob2.setJob(job2); //设置多个作业直接的依赖关系 //如下所写: //意思为job2的启动,依赖于job1作业的完成 ctrljob2.addDependingJob(ctrljob1); //输入路径是上一个作业的输出路径,因此这里填args[1],要和上面对应好 FileInputFormat.addInputPath(job2, new Path(args[1])); //输出路径从新传入一个参数,这里需要注意,因为我们最后的输出文件一定要是没有出现过得 //因此我们在这里new Path(args[2])因为args[2]在上面没有用过,只要和上面不同就可以了 FileOutputFormat.setOutputPath(job2,new Path(args[2]) ); //主的控制容器,控制上面的总的两个子作业 JobControl jobCtrl=new JobControl("myctrl"); //添加到总的JobControl里,进行控制 jobCtrl.addJob(ctrljob1); jobCtrl.addJob(ctrljob2); //在线程启动,记住一定要有这个 Thread t=new Thread(jobCtrl); t.start(); while(true){ if(jobCtrl.allFinished()){//如果作业成功完成,就打印成功作业的信息 System.out.println(jobCtrl.getSuccessfulJobList()); jobCtrl.stop(); break; } } } }