思路:对于每一行,两两进行组合作为一个新key,把每一行的第一个人作为value,map输出<key,value>。
reduce函数进行聚合合并相同的key(即在map阶段的两两组合的新key),具有相同key的 "每行第一个人列表" 就是key的共同朋友。
代码:
package 优酷面试题; import java.io.IOException; import java.util.Set; import java.util.StringTokenizer; import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; public class FindFriend { public static class ChangeMapper extends Mapper<Object, Text, Text, Text>{ @Override public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); Text owner = new Text(); Set<String> set = new TreeSet<String>(); owner.set(itr.nextToken()); while (itr.hasMoreTokens()) { set.add(itr.nextToken()); } String[] friends = new String[set.size()]; friends = set.toArray(friends); for(int i=0;i<friends.length;i++){ for(int j=i+1;j<friends.length;j++){ String outputkey = friends[i]+friends[j]; context.write(new Text(outputkey),owner); } } } } /* 这是 StringTokenizer 类下的一个方法。你首先要知道 StringTokenizer 是干什么用的。 StringTokenizer 用来分割字符串,你可以指定分隔符,比如',',或者空格之类的字符。nextToken() 用于返回下一个匹配的字段。 ----------------------- 给你一个Demo: import java.util.StringTokenizer; public class Demo { public static void main (String args[]) { String str = "Hello, world"; StringTokenizer st = new StringTokenizer(str, ","); // 用逗号分隔 while (st.hasMoreTokens()) // 判断是否已经到结尾 System.out.println(st.nextToken()); // 打印下一个字段 } } ---------------------- 结果: Hello world */ public static class FindReducer extends Reducer<Text,Text,Text,Text> { public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { String commonfriends =""; for (Text val : values) { if(commonfriends == ""){ commonfriends = val.toString(); }else{ commonfriends = commonfriends+":"+val.toString(); } } context.write(key, new Text(commonfriends)); } } @SuppressWarnings("deprecation") public static void main(String[] args) throws IOException,InterruptedException, ClassNotFoundException { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); /* 比如运行命令为:bin/hadoop dfs -fs master:8020 -ls /data GenericOptionsParser把 -fs master:8020配置到配置conf中 而getRemainingArgs()方法则得到剩余的参数,就是 -ls /data。供下面使用输入输出参数 */ if (otherArgs.length < 2) { System.err.println("args error"); System.exit(2); } Job job = new Job(conf, "word count"); job.setJarByClass(FindFriend.class); job.setMapperClass(ChangeMapper.class); job.setCombinerClass(FindReducer.class); job.setReducerClass(FindReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); for (int i = 0; i < otherArgs.length - 1; ++i) { FileInputFormat.addInputPath(job, new Path(otherArgs[i])); } FileOutputFormat.setOutputPath(job,new Path(otherArgs[otherArgs.length - 1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } /* GenericOptionsParser 命令行解析器 是hadoop框架中解析命令行参数的基本类。它能够辨别一些标准的命令行参数,能够使应用程序轻易地指定namenode,jobtracker,以及其他额外的配置资源 */ }