十四、Window10下Eclipse远程调试MapReduce程序

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/QQB67G8COM/article/details/86635498

1、现在Linux服务器将hadoop的安装包copy一份到window10系统下找个地方解压,例:F:\programming\hadoop-2.8.5

2、在网上下载winutils.exe工具和hadoop.dll动态链接库文件,winutils.exe放置到hadoop的bin目录中(不是sbin),hadoop.dll放置到window10的系统盘的system32文件夹下

3、在window10上配置hadoop的环境变量,配置HADOOP_HOME,在PATH中添加:%HADOOP_HOME%\bin,不是sbin,新版的hadoop在linux下是配置sbin的,这里系统需要在bin下找到winutils.exe这个工具

4、启动cmd,输入:hadoop

在这里插入图片描述
succeed!

5、在window10下的eclipse上运行mapreduce程序,此前有必要将hadoop-2.8.5/etc/hadoop下的log4j.properties文件复制到eclipse的src目录下保证日志在控制台输出正常

package com.ljj.MR

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

	//定义Map内部类实现字符串分解
	public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>
	{
		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();

		//实现map()函数
		public void map(Object key, Text value, Context context) throws IOException, InterruptedException
		{
			//将字符串拆分成单词
			StringTokenizer itr = new StringTokenizer(value.toString());
			while (itr.hasMoreTokens()) {
				word.set(itr.nextToken());		//将分解后的一个单词写入word类
				context.write(word, one);		//手机<key, value>
			}
		}
	}

	//定义Reduce内部类规约同一key的value
	public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> 
	{
		private IntWritable result = new IntWritable();
		//实现reduce()函数
		public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
			int sum = 0;
			//遍历迭代器values,得到同一key的所有value
			for (IntWritable val : values) {
				sum += val.get();
			}
			result.set(sum);
			context.write(key, result);//产生输出对<key,result>
		}
	}

	public static void main(String[] args) throws Exception {
		//为任务设定配置文件
		Configuration conf = new Configuration();
		//hadoop中的一次计算任务成为Job,面函数主要负责新建一个job对象并为之设定相应的mapper和reducer类,以及输入、输出路径
		Job job = Job.getInstance(conf, "word count");//新建一个用户定义的Job
		job.setJarByClass(WordCount.class);//设置执行任务的jar
		job.setMapperClass(TokenizerMapper.class);//设置Mapper类
		job.setCombinerClass(IntSumReducer.class);//设置combine类
		job.setReducerClass(IntSumReducer.class);//设置reducer类
		job.setOutputKeyClass(Text.class);//设置job输出的key
		job.setOutputValueClass(IntWritable.class);//设置job输出的值
		FileInputFormat.addInputPath(job, new Path("hdfs://master:8020/tmp/wc/input"));//设置输入文件路径--------this------------
		FileOutputFormat.setOutputPath(job, new Path("hdfs://master:8020/tmp/wc/output2"));//设置输出文件路径------this--------
		System.exit(job.waitForCompletion(true) ? 0 : 1);//提交任务等待完成
	}
}

上程序为官方源案例程序,我只更改了输入和输出参数

6、程序成功运行后可能会报异常,因为该程序是以匿名用户的身份去访问hdfs的,正常来说,设置hdfs下的文件夹对于管理员用户组以外的用户一般只可读不可写,测试期间想要省事直接:hdfs dfs -chmod 777 hdfsFIlePath,这样就对所有用户都开放了

org.apache.hadoop.security.AccessControlException: Permission denied: user=Administrator, access=WRITE, inode="/tmp/wc":ljj:supergroup:drwxr-xr-x
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:318)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:219)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:189)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1663)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1647)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1606)
	at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3041)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1079)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:652)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:447)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:989)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:850)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:793)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1844)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2489)

	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121)
	at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88)
	at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2474)
	at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2447)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1159)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1156)
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1156)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1148)
	at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1914)
	at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:343)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:538)
Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=Administrator, access=WRITE, inode="/tmp/wc":ljj:supergroup:drwxr-xr-x
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:318)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:219)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:189)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1663)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1647)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1606)
	at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3041)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1079)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:652)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:447)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:989)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:850)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:793)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1844)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2489)

	at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1489)
	at org.apache.hadoop.ipc.Client.call(Client.java:1435)
	at org.apache.hadoop.ipc.Client.call(Client.java:1345)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:227)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
	at com.sun.proxy.$Proxy14.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:583)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:409)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:163)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:155)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:346)
	at com.sun.proxy.$Proxy15.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2472)
	... 9 more
19/01/25 00:20:52 INFO mapreduce.Job: Job job_local1430700062_0001 running in uber mode : false
19/01/25 00:20:52 INFO mapreduce.Job:  map 0% reduce 0%
19/01/25 00:20:52 INFO mapreduce.Job: Job job_local1430700062_0001 failed with state FAILED due to: NA
19/01/25 00:20:52 INFO mapreduce.Job: Counters: 0

猜你喜欢

转载自blog.csdn.net/QQB67G8COM/article/details/86635498
今日推荐