package hbase;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.util.*;
public class WordCount {
//重写map方法
private static class wordMap extends Mapper<LongWritable,Text,Text,IntWritable>
{
private Text word=new Text();
private final static IntWritable one=new IntWritable(1);
public void map(LongWritable key, Text value,Context context) throws IOException,InterruptedException {
System.out.println(value);
System.out.println(key);
StringTokenizer it=new StringTokenizer(value.toString());
while(it.hasMoreTokens())
{
word.set(it.nextToken());
context.write(word, one);
}
}
}
//重写方法reduce
public static class insumReduce extends Reducer<Text,IntWritable,Text,IntWritable>
{
public void reduce(Text key, Iterator<IntWritable> values,Context context) throws Exception {
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
context.write(key, new IntWritable(sum));
}
}
//main
public static void main(String args[]) throws Exception
{
String input="in";
String output="testout2";
Configuration conf=new Configuration();
Job job=new Job(conf,"word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(wordMap.class);
job.setCombinerClass(insumReduce.class);
job.setReducerClass(insumReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output));
System.exit(job.waitForCompletion(true)?0:1);
}
}
1. 修改hadoop文件
JAVA=$JAVA_HOME/bin/java
JAVA_HEAP_MAX=-Xmx1024m
2 修改hadoop-env.sh文件
# The maximum amount of heap to use, in MB. Default is 1000.
export HADOOP_HEAPSIZE=2000
3, 环境变量 JAVA_OPTS
-Xms64m -Xmx1024m