intelij创建MapReduce工程

1、创建一个maven工程

2、POM文件


xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0

com.sogou
teemo-dc-etl
1.0.0
jar

teemo-dc-etl
http://maven.apache.org


UTF-8
0.5
org.apache.mahout
3.0.6.RELEASE




maven-ali
http://maven.twttr.com/

true


true
always
fail






junit
junit
3.8.1
test



org.apache.hadoop
hadoop-hdfs
2.5.0



org.apache.hadoop
hadoop-client
2.5.1



org.apache.hadoop
hadoop-common
2.5.0



com.hadoop.gplcompression
hadoop-lzo
0.4.19



org.apache.hadoop
hadoop-yarn-common
2.5.2



com.alibaba
fastjson
1.2.4









maven-assembly-plugin







jar-with-dependencies




make-assembly
package

single





org.apache.maven.plugins
maven-compiler-plugin

1.6
1.6
UTF-8




org.apache.maven.plugins
maven-surefire-plugin
2.14.1

-Xmx2048m





这里有个lzo包,需要增加twiter的资源库
3、mapreduce文件写法
package com.sogou.teemo.test;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.StringTokenizer;

public class WordCount {
/* Mapper */
public static class TokenizerMapper extends Mapper {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException{
StringTokenizer itr = new StringTokenizer(value.toString());
while(itr.hasMoreTokens()){
word.set(itr.nextToken());
context.write(word, one);
}
}
}

/* Reducer */
public static class IntSumReducer extends Reducer{
private IntWritable result = new IntWritable();
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException,InterruptedException{
int sum = 0;
for(IntWritable val : values){
sum += val.get();
}
result.set(sum);
context.write(key,result);
}
}

/* 启动 MapReduce Job */
public static void main(String[] args) throws Exception{
System.setProperty("hadoop.home.dir","D:/hadoop-2.6.5" );
Configuration conf = new Configuration();
/*if(args.length != 2){
System.err.println("Usage: wordcount ");
System.exit(2);
}*/
String arg1 = "input";
String arg2 = "output";
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job,new Path(arg1));
FileOutputFormat.setOutputPath(job,new Path(arg2));
System.exit(job.waitForCompletion(true)?0:1);
}
}


你可能感兴趣的:(intelij创建MapReduce工程)