hadoop学习2——DistributedCache的部分用法

DistributedCache的部分用法。

调试代码:wordcount2.java

public class WordCount2 extends Configured implements Tool {
	
	static Logger log = Logger.getLogger(WordCount2.class);
	
	public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
		
		static enum Counters {
			INPUT_WORDS
		}

		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();

		private boolean caseSensitive = true;	//是否区分大小写
		private Set<String> patternsToSkip = new HashSet<String>();		//替换使用的正则表达式

		private long numRecords = 0;	//数据量
		private String inputFile;		

		public void configure(JobConf job) {
			caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
			inputFile = job.get("map.input.file");
			
			log.info("caseSensitive:" + job.get("wordcount.case.sensitive")
						+ "		inputFile:" + inputFile 
						+ "		patterns:" + job.get("wordcount.skip.patterns"));

			if (job.getBoolean("wordcount.skip.patterns", false)) {
				log.info("传入参数wordcount.skip.patterns");
				Path[] patternsFiles = new Path[0];
				try {
//					patternsFiles[0] = DistributedCache.getCacheFiles(job);	//读取正则表达式路径(通过配置参数传递)
					
					URI[] uris = DistributedCache.getCacheFiles(job);
					patternsFiles = new Path[uris.length];
					for(int i = 0; i < uris.length; i++){
						Path path = new Path(uris[i].toString());
//						Path path = new Path("D:/patterns.txt");
						patternsFiles[i] = path;
					}
//					log.info(uris[0].toString());
					
//					patternsFiles = DistributedCache.getLocalCacheFiles(job);
//					log.info(patternsFiles.length);
				} catch (IOException ioe) {
					System.err.println("Caught exception while getting cached files: "
									+ StringUtils.stringifyException(ioe));
				}
				for (Path patternsFile : patternsFiles) {
					parseSkipFile(patternsFile);
				}
			}
		}
		
		//提取文件中的正则表达式
		private void parseSkipFile(Path patternsFile) {
			log.info("提取文件中的正则表达式");
			try {
//				BufferedReader fis = new BufferedReader(new FileReader(patternsFile.toString()));
//				BufferedReader fis = new BufferedReader(new FileReader("hdfs://192.168.100.228:9000/temp/p.dat"));
				String pattern = null;
//				while ((pattern = fis.readLine()) != null) {
//					log.info("正则表达式:" + pattern);
//					patternsToSkip.add(pattern);
//				}
				//读取hdfs中的模式表达式文件
				Configuration conf = new Configuration();
				  FileSystem fs = FileSystem.get(patternsFile.toUri(), conf);
				  FSDataInputStream hdfsInStream = fs.open(patternsFile);
				  String s = "";
				  while (s != null) {
					s = hdfsInStream.readLine();
					if(s != null){
						System.out.println(s);
						patternsToSkip.add(s);
					}	
				  }
				  hdfsInStream.close();
//				  fs.close();
				  log.info("正则表达式列表:" + patternsToSkip);
				  
			} catch (IOException ioe) {
				System.err.println("Caught exception while parsing the cached file '"
								+ patternsFile
								+ "' : "
								+ StringUtils.stringifyException(ioe));
			}
		}

		public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			log.info("map 线程id:" + Thread.currentThread().getId());
			
			String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();

			for (String pattern : patternsToSkip) {
				line = line.replaceAll(pattern, "");
			}

			StringTokenizer tokenizer = new StringTokenizer(line);
			while (tokenizer.hasMoreTokens()) {
				word.set(tokenizer.nextToken());
				output.collect(word, one);
				reporter.incrCounter(Counters.INPUT_WORDS, 1);
			}

			if ((++numRecords % 100) == 0) {
				reporter.setStatus("Finished processing " + numRecords
						+ " records " + "from the input file: " + inputFile);
			}
		}
	}

	public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
		public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			log.info("reduce 线程id:" + Thread.currentThread().getId());
			int sum = 0;
			while (values.hasNext()) {
				sum += values.next().get();
			}
			output.collect(key, new IntWritable(sum));
		}
	}

	public int run(String[] args) throws Exception {
		JobConf conf = new JobConf(getConf(), WordCount2.class);
		conf.setJobName("wordcount");

		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(IntWritable.class);

		conf.setMapperClass(Map.class);
		conf.setCombinerClass(Reduce.class);
		conf.setReducerClass(Reduce.class);

		conf.setInputFormat(TextInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);

		//设置正则表达式文件路径
		DistributedCache.addCacheFile(new URI("/temp/p.dat"), conf);	//向DistributedCache中add一个hdfs文件path
		conf.setBoolean("wordcount.skip.patterns", true);
		
		
//		List<String> other_args = new ArrayList<String>();
//		for (int i = 0; i < args.length; ++i) {
//			if ("-skip".equals(args[i])) {
//				DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
//				conf.setBoolean("wordcount.skip.patterns", true);
//			} else {
//				other_args.add(args[i]);
//			}
//		}

		FileInputFormat.setInputPaths(conf, new Path("/temp/in2"));
		FileOutputFormat.setOutputPath(conf, new Path("/temp/out-" + String.valueOf(System.currentTimeMillis())));

		JobClient.runJob(conf);
		return 0;
	}

	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new WordCount2(), args);
		System.exit(res);
	}

}

 其中:

DistributedCache.addCacheFile(new URI("/temp/p.dat"), conf); //向DistributedCache中add一个hdfs文件path

patternsFiles[0] = DistributedCache.getCacheFiles(job); //读取正则表达式路径(通过配置参数传递)
表示了DistributedCache的基本用法。

使用DistributedCache可以在任务执行前传递一个URI路径,在map或reduce中可以使用DistributedCache.get*()拿到此路径对应的文件,这个文件可以是文档,jar包等。DistributedCache还提供直接把jar包加入classpath功能,利用这个功能,可以方便的使用第三方库。

输出日志:

12/02/09 17:28:50 INFO jvm.JvmMetrics: Initializing JVM Metrics with processName=JobTracker, sessionId=
12/02/09 17:28:51 INFO mapred.FileInputFormat: Total input paths to process : 2
12/02/09 17:28:51 INFO mapred.JobClient: Running job: job_local_0001
12/02/09 17:28:51 INFO mapred.FileInputFormat: Total input paths to process : 2
12/02/09 17:28:51 INFO mapred.MapTask: numReduceTasks: 1
12/02/09 17:28:51 INFO mapred.MapTask: io.sort.mb = 100
12/02/09 17:28:51 INFO mapred.MapTask: data buffer = 79691776/99614720
12/02/09 17:28:51 INFO mapred.MapTask: record buffer = 262144/327680
12/02/09 17:28:51 INFO test.WordCount2: caseSensitive:null		inputFile:hdfs://localhost:9000/temp/in2/t1.txt		patterns:true
12/02/09 17:28:51 INFO test.WordCount2: 传入参数wordcount.skip.patterns
12/02/09 17:28:51 INFO test.WordCount2: 提取文件中的正则表达式
12/02/09 17:28:51 INFO test.WordCount2: 正则表达式列表:[\! , \, , \. , to ]
\. 
\, 
\! 
to 
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Starting flush of map output
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Finished spill 0
12/02/09 17:28:51 INFO mapred.TaskRunner: Task:attempt_local_0001_m_000000_0 is done. And is in the process of commiting
12/02/09 17:28:51 INFO mapred.LocalJobRunner: hdfs://localhost:9000/temp/in2/t1.txt:0+52
12/02/09 17:28:51 INFO mapred.TaskRunner: Task 'attempt_local_0001_m_000000_0' done.
12/02/09 17:28:51 INFO mapred.MapTask: numReduceTasks: 1
12/02/09 17:28:51 INFO mapred.MapTask: io.sort.mb = 100
12/02/09 17:28:51 INFO mapred.MapTask: data buffer = 79691776/99614720
12/02/09 17:28:51 INFO mapred.MapTask: record buffer = 262144/327680
12/02/09 17:28:51 INFO test.WordCount2: caseSensitive:null		inputFile:hdfs://localhost:9000/temp/in2/t2.txt		patterns:true
12/02/09 17:28:51 INFO test.WordCount2: 传入参数wordcount.skip.patterns
12/02/09 17:28:51 INFO test.WordCount2: 提取文件中的正则表达式
\. 
\, 
\! 
to 
12/02/09 17:28:51 INFO test.WordCount2: 正则表达式列表:[\! , \, , \. , to ]
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Starting flush of map output
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO mapred.MapTask: Finished spill 0
12/02/09 17:28:52 INFO mapred.TaskRunner: Task:attempt_local_0001_m_000001_0 is done. And is in the process of commiting
12/02/09 17:28:52 INFO mapred.LocalJobRunner: hdfs://localhost:9000/temp/in2/t2.txt:0+35
12/02/09 17:28:52 INFO mapred.TaskRunner: Task 'attempt_local_0001_m_000001_0' done.
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO mapred.Merger: Merging 2 sorted segments
12/02/09 17:28:52 INFO mapred.Merger: Down to the last merge-pass, with 2 segments left of total size: 184 bytes
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO mapred.TaskRunner: Task:attempt_local_0001_r_000000_0 is done. And is in the process of commiting
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO mapred.TaskRunner: Task attempt_local_0001_r_000000_0 is allowed to commit now
12/02/09 17:28:52 INFO mapred.FileOutputCommitter: Saved output of task 'attempt_local_0001_r_000000_0' to hdfs://localhost:9000/temp/out-1328779730906
12/02/09 17:28:52 INFO mapred.LocalJobRunner: reduce > reduce
12/02/09 17:28:52 INFO mapred.TaskRunner: Task 'attempt_local_0001_r_000000_0' done.
12/02/09 17:28:52 INFO mapred.JobClient:  map 100% reduce 100%
12/02/09 17:28:52 INFO mapred.JobClient: Job complete: job_local_0001
12/02/09 17:28:52 INFO mapred.JobClient: Counters: 16
12/02/09 17:28:52 INFO mapred.JobClient:   FileSystemCounters
12/02/09 17:28:52 INFO mapred.JobClient:     FILE_BYTES_READ=67623
12/02/09 17:28:52 INFO mapred.JobClient:     HDFS_BYTES_READ=63479
12/02/09 17:28:52 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=64858
12/02/09 17:28:52 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=131732
12/02/09 17:28:52 INFO mapred.JobClient:   com.hadoop.test.WordCount2$Map$Counters
12/02/09 17:28:52 INFO mapred.JobClient:     INPUT_WORDS=16
12/02/09 17:28:52 INFO mapred.JobClient:   Map-Reduce Framework
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce input groups=16
12/02/09 17:28:52 INFO mapred.JobClient:     Combine output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Map input records=5
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce shuffle bytes=0
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Spilled Records=32
12/02/09 17:28:52 INFO mapred.JobClient:     Map output bytes=148
12/02/09 17:28:52 INFO mapred.JobClient:     Map input bytes=87
12/02/09 17:28:52 INFO mapred.JobClient:     Combine input records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Map output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce input records=16

 测试数据是两个文件,执行了两个task,可以看出每次执行task时都会加载一次配置(读了两次配置文件)。

猜你喜欢

转载自goon.iteye.com/blog/1400201