实现map()函数每次读取5行
因为我们需要对读数据的时候的逻辑进行修改,所以只需要实现自己的InputFormat即可。
输入数据文件为:
json.dat
{
"name":"ta",
"age":12,
"sex":1
}
{
"name":"la",
"age":13,
"sex":2
}
...
输出结果为:
{"name":"ta","age":12,"sex":1}
{"name":"la","age":13,"sex":2}
测试类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.BasicConfigurator;
import java.io.IOException;
public class Test3 {
public static class MyMapper extends Mapper<LongWritable, Text,Text, Text>{
IntWritable iw = new IntWritable(1);
Text text = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
text.set(line);
context.write(text,new Text(""));
}
}
public static class MyReducer extends Reducer<Text,Text,Text,NullWritable>{
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
context.write(key,NullWritable.get());