报错
原因分析
package 美国疫情.州分区;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class Driver3 {
public static void main(String[] args) {
try {
// 获取job
Configuration conf = new Configuration();
// 配置数据库
DBConfiguration.configureDB(
conf,
"com.mysql.jdbc.Driver",
"jdbc:mysql://192.168.64.178:3306/school",
"root","123456"
);
Job job = Job.getInstance(conf);
// 配置map、reduce、driver
job.setMapperClass(Map3.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(BeanSQL.class);
job.setReducerClass(Reduce3.class);
job.setOutputKeyClass(BeanSQL.class);
job.setOutputValueClass(NullWritable.class);
job.setJarByClass(Driver3.class);
// 配置分区类
job.setPartitionerClass(StatePartition.class);
job.setNumReduceTasks(7);
// 配置数据输入输出
job.setInputFormatClass(DBInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
String[] fields = {
"date","country","state","fips","cases","deaths"};
DBInputFormat.setInput(job,BeanSQL.class,"usa",null,"",fields);
FileOutputFormat.setOutputPath(job,new Path("G:/Cache/MR/output_usa"));
// 提交job
System.exit(job.waitForCompletion(true) ? 0:1);
} catch (Exception e){
e.printStackTrace();
}
}
}
在一开始编写程序时候的 job.setOutputFormatClass(TextOutputFormat.class);
写成了 job.setOutputFormatClass(FileOutputFormat.class);
,这里定义的输出类写错了!
常见的输出类型参见:【MapReduce】---- MR 框架原理 之 OutputFormat 数据输出