步骤:
1)分布式的运算程序往往需要分成至少2个阶段。
2)第一个阶段的MapTask并发实例,完全并行运行,互不相干。
3)第二个阶段的ReduceTask并发实例互不相干,但是他们的数据依赖于上一个阶段的所有MapTask并发实例的输出。
4)MapReduce编程模型只能包含一个Map阶段和一个Reduce阶段,如果用户的业务逻辑非常复杂,那就只能多个MapReduce程序,串行运行。
总结:分析WordCount数据流走向深入理解MapReduce核心思想。
com.lj</groupId>
HadoopLearn</artifactId>
1.0-SNAPSHOT</version>
junit</groupId>
junit</artifactId>
RELEASE</version>
</dependency>
org.apache.logging.log4j</groupId>
log4j-core</artifactId>
2.8.2</version>
</dependency>
org.apache.hadoop</groupId>
hadoop-common</artifactId>
2.6.0</version>
</dependency>
org.apache.hadoop</groupId>
hadoop-client</artifactId>
2.6.0</version>
</dependency>
org.apache.hadoop</groupId>
hadoop-hdfs</artifactId>
2.6.0</version>
</dependency>
</dependencies>
package com.lj.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
// map阶段
// KEYIN 输入数据的key
// VALUEIN 输入数据的value
// KEYOUT 输出数据的key的类型 atguigu,1 ss,1
// VALUEOUT 输出的数据的value类型
public class WordcountMapper extends Mapper, Text, Text, IntWritable>{
Text k = new Text();
IntWritable v = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
// 1 获取一行
String line = value.toString();
// 2 切割单词
String[] words = line.split(" ");
// 3 循环写出
for (String word : words) {
k.set(word);
context.write(k, v);
}
}
}
package com.lj.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
// KEYIN, VALUEIN map阶段输出的key和value
public class WordcountReducer extends Reducer, IntWritable, Text, IntWritable>{
private IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable values,
Context context) throws IOException, InterruptedException {
int sum = 0;
// 1 累加求和
for (IntWritable value : values) {
sum += value.get();
}
v.set(sum);
// 2 写出 atguigu 2
context.write(key, v);
}
}
package com.lj.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
// import org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[] {
"F:/HadoopTest/MapReduce/input/learn.txt", "F:/HadoopTest/MapReduce/output" };
Configuration conf = new Configuration();
// 1 获取Job对象
Job job = Job.getInstance(conf);
// 2 设置jar存储位置
job.setJarByClass(WordCountDriver.class);
// 3 关联Map和Reduce类
job.setMapperClass(WordcountMapper.class);
job.setReducerClass(WordcountReducer.class);
// 4 设置Mapper阶段输出数据的key和value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 设置最终数据输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6 设置输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交job
// job.submit();
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
maven-compiler-plugin</artifactId>
2.3.2</version>
1.8</source>
1.8</target>
</configuration>
</plugin>
maven-assembly-plugin </artifactId>
jar-with-dependencies</descriptorRef>
</descriptorRefs>
<!--打包的main函数-->
com.lj.WordCount.WordCountDriver</mainClass>
</manifest>
</archive>
</configuration>
make-assembly</id>
package</phase>
single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
public FlowBean() {
super();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public int compareTo(FlowBean o) {
// 倒序排列,从大到小
return this.sumFlow > o.getSumFlow() ? -1 : 1;
}
序列化示例代码:
package com.lj.flowsum;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private long upFlow;// 上行流量
private long downFlow;// 下行流量
private long sumFlow;// 总流量
// 空参构造, 为了后续反射用
public FlowBean() {
super();
}
public FlowBean(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow + downFlow;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
// 必须要求和序列化方法顺序一致
upFlow = dataInput.readLong();
downFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void set(long upFlow2, long downFlow2) {
upFlow = upFlow2;
downFlow = downFlow2;
sumFlow = upFlow2 + downFlow2;
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
}
package com.lj.flowsum;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private Text k = new Text();
private FlowBean v = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 7 13560436666 120.196.100.99 1116 954 200
// 1 获取一行
String line = value.toString();
// 2 切割 \t
String[] fields = line.split("\t");
// 3 封装对象
k.set(fields[1]);// 封装手机号
long upFlow = Long.parseLong(fields[fields.length - 3]);
long downFlow = Long.parseLong(fields[fields.length - 2]);
v.setUpFlow(upFlow);
v.setDownFlow(downFlow);
// v.set(upFlow,downFlow);
// 4 写出
context.write(k, v);
}
}
package com.lj.flowsum;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
FlowBean v = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context)
throws IOException, InterruptedException {
// 13568436656 2481 24681 30000
// 13568436656 1116 954 20000
long sum_upFlow = 0;
long sum_downFlow = 0;
// 1 累加求和
for (FlowBean flowBean : values) {
sum_upFlow += flowBean.getUpFlow();
sum_downFlow += flowBean.getDownFlow();
}
v.set(sum_upFlow, sum_downFlow);
// 2 写出
context.write(key, v);
}
}
package com.lj.flowsum;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class ProvincePartitioner extends Partitioner<Text, FlowBean>{
@Override
public int getPartition(Text key, FlowBean value, int numPartitions) {
// key是手机号
// value 流量信息
// 获取手机号前三位
String prePhoneNum = key.toString().substring(0, 3);
int partition = 4;
if ("136".equals(prePhoneNum)) {
partition = 0;
}else if ("137".equals(prePhoneNum)) {
partition = 1;
}else if ("138".equals(prePhoneNum)) {
partition = 2;
}else if ("139".equals(prePhoneNum)) {
partition = 3;
}
return partition;
}
}
package com.lj.flowsum;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowsumDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{
"F:/HadoopTest/MapReduce/input/phone_data.txt","F:/HadoopTest/MapReduce/output/"};
Configuration conf = new Configuration();
// 1 获取job对象
Job job = Job.getInstance(conf );
// 2 设置jar的路径
job.setJarByClass(FlowsumDriver.class);
// 3 关联mapper和reducer
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReducer.class);
// 4 设置mapper输出的key和value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
// 5 设置最终输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// job.setPartitionerClass(ProvincePartitioner.class);
//
// job.setNumReduceTasks(6);
//
// 6 设置输入输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交job
boolean result = job.waitForCompletion(true);
System.exit(result?0 :1);
}
}
对以前的知识回顾,加深基础知识!
学习来自:尚硅谷大数据学习视频
每天进步一点点,也许某一天你也会变得那么渺小!!!