MapReduce当中topN问题的解决方法

topN问题:
马克-to-win @ 马克java社区:防盗版实名手机尾号:73203。topN问题就是输出每组中最大的一个或几个。为什么说是一个或几个呢?因为输出一个或者输出几个的算法是一样的。我们下面以输出一个为例子,做一个示范。





6个订单如下:

o1,p2,250.0
o2,p3,500.0
o2,p4,100.0
o2,p5,700.0
o3,p1,150.0
o1,p1,200.0

求出每个订单中最高的一个或者两个,输出结果:

o1    250.0
o2    700.0
o3    150.0


package com;

import java.io.File;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class TopNTestMark_to_win {

    public static class TokenizerMapper extends Mapper<Object, Text, Text, DoubleWritable> {
/*
o1,p2,250.0
o2,p3,500.0
o2,p4,100.0
o2,p5,700.0
o3,p1,150.0
o1,p1,200.0
*/
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            System.out.println("key is " + key.toString() + " value is " + value.toString());
            String line = value.toString();
            String[] fields = line.split(",");
            String orderId = fields[0];
            double amount = Double.parseDouble(fields[2]);
            DoubleWritable amountDouble = new DoubleWritable(amount);
            context.write(new Text(orderId), amountDouble);
        }

    }

    public static class IntSumReducer extends Reducer<Text, DoubleWritable, Text, DoubleWritable> {

        DoubleWritable resultDouble = new DoubleWritable(0.0);
        public void reduce(Text key, Iterable<DoubleWritable> values, Context context)
                throws IOException, InterruptedException {
            System.out.println("reduce key is 马克-to-win @ 马克java社区:" + key.toString());
            double max = Double.MIN_VALUE;
            for (DoubleWritable v2 : values) {
                if (v2.get() > max) {
                    max = v2.get();
                }
            }
            resultDouble.set(max);
            context.write(key, resultDouble);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = new Job(conf, "word count");
        job.setJarByClass(TopNTest.class);
        job.setMapperClass(TokenizerMapper.class);
        // job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        File file = new File("e:/temp/output");
        if (file.exists() && file.isDirectory()) {
            deleteFile(file);
        }

        FileInputFormat.setInputPaths(job, new Path("e:/temp/input/serial.txt"));
        FileOutputFormat.setOutputPath(job, new Path("e:/temp/output"));

        System.out.println("mytest hadoop successful");
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

    public static boolean deleteFile(File dirFile) {
        if (!dirFile.exists()) {
            return false;
        }
        if (dirFile.isFile()) {
            return dirFile.delete();
        } else { /* 空目录就不进入for循环了, 进入到下一句dirFile.delete(); */
            for (File file : dirFile.listFiles()) {
                deleteFile(file);
            }
        }
        return dirFile.delete();
    }

}

输出结果:
o1    250.0
o2    700.0
o3    150.0