文章目录
- 一、WordCount
-
- 1、WordCountDriverNew
- 2、WordCountMapper
- 3、WordCountReducer
- 二、学生信息排序
-
- 1、Student
- 2、StudentDriver
- 3、StudentMapper
- 4、StudentReduce
- 三、学生成绩求和
-
- 1、ScoreDriver
- 2、ScoreMapper
- 3、ScoreReduce
代码有注释
一、WordCount
1、WordCountDriverNew
package net.sherry.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class WordCountDriverNew {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("dfs.client.use.datanode.hostname", "true");
Job job = Job.getInstance(conf);
job.setJarByClass(WordCountDriverNew.class);
job.setMapperClass(WordCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(3);
String uri = "hdfs://hadoop102:8020";
String user = "sherry";
Path inputPath = null;
Path outputPath = null;
if (args.length == 0){
inputPath = new Path(uri + "/wordcount/input");
outputPath = new Path(uri + "/wordcount/output");
} else if (args.length==2) {
inputPath = new Path(uri + args[0]);
outputPath = new Path(uri + args[1]);
}else{
System.out.println("参数个数要么是0个要么是2个");
return;
}
FileSystem fs = FileSystem.get(new URI(uri), conf, user);
fs.delete(outputPath, true);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
System.out.println("======统计结果======");
FileStatus[] fileStatuses = fs.listStatus(outputPath);
for (int i = 1; i < fileStatuses.length; i++) {
System.out.println(fileStatuses[i].getPath());
FSDataInputStream in = fs.open(fileStatuses[i].getPath());
IOUtils.copyBytes(in, System.out, 4096, false);
}
}
}
2、WordCountMapper
package net.sherry.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper extends Mapper<LongWritable, Text, Text , IntWritable> {
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
System.out.println("源文件行:"+line);
line = line.replaceAll("[\\pP]", "");
System.out.println("正则处理之后:"+line);
String[] words = line.split(" ");
for (String word : words){
context.write(new Text(word), new IntWritable(1));
}
}
}
3、WordCountReducer
package net.sherry.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class WordCountReducer extends Reducer<Text, IntWritable , Text , IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int count = 0;
for (IntWritable value : values) {
count += value.get();
}
context.write(key, new IntWritable(count));
}
}
二、学生信息排序
1、Student
package net.sherry.student;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class Student implements WritableComparable<Student> {
private String name;
private String gender;
private int age;
private String phone;
private String major;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getGender() {
return gender;
}
public void setGender(String gender) {
this.gender = gender;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public String getPhone() {
return phone;
}
public void setPhone(String phone) {
this.phone = phone;
}
public String getMajor() {
return major;
}
public void setMajor(String major) {
this.major = major;
}
@Override
public String toString() {
return "Student{" +
"name='" + name + '\'' +
", gender='" + gender + '\'' +
", age='" + age + '\'' +
", phone='" + phone + '\'' +
", major='" + major + '\'' +
'}';
}
public int compareTo(Student o){
if (this.getGender().compareTo(o.getGender())==0){
return o.getAge() - this.getAge();
}else {
return o.getGender().compareTo(this.getGender());
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(name);
out.writeUTF(gender);
out.writeInt(age);
out.writeUTF(phone);
out.writeUTF(major);
}
@Override
public void readFields(DataInput in) throws IOException {
name = in.readUTF();
gender = in.readUTF();
age = in.readInt();
phone = in.readUTF();
major = in.readUTF();
}
}
2、StudentDriver
package net.sherry.student;
import net.sherry.sum.ScoreMapper;
import net.sherry.sum.ScoreReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class StudentDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("dfs.client.use.datanode.hostname", "true");
Job job = Job.getInstance(conf);
job.setJarByClass(StudentDriver.class);
job.setMapperClass(StudentMapper.class);
job.setMapOutputKeyClass(Student.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(StudentReducer.class);
job.setOutputKeyClass(Student.class);
job.setOutputValueClass(NullWritable.class);
String uri = "hdfs://hadoop102:8020";
String user = "sherry";
Path inputPath = new Path(uri + "/wordcount/input/student.txt");
Path outputPath = new Path(uri + "/wordcount/output/student");
FileSystem fs = FileSystem.get(new URI(uri), conf, user);
fs.delete(outputPath, true);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
System.out.println("======统计结果======");
FileStatus[] fileStatuses = fs.listStatus(outputPath);
for (int i = 1; i < fileStatuses.length; i++) {
System.out.println(fileStatuses[i].getPath());
FSDataInputStream in = fs.open(fileStatuses[i].getPath());
IOUtils.copyBytes(in, System.out, 4096, false);
}
}
}
3、StudentMapper
package net.sherry.student;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class StudentMapper extends Mapper<LongWritable, Text, Student, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
System.out.println(line);
String[] fields = line.split(" ");
String name = fields[0];
String gender = fields[1];
int age = Integer.parseInt(fields[2]);
String phone = fields[3];
String major = fields[4];
Student student = new Student();
student.setName(name);
student.setGender(gender);
student.setAge(age);
student.setPhone(phone);
student.setMajor(major);
context.write(student, NullWritable.get());
}
}
4、StudentReduce
package net.sherry.student;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class StudentReducer extends Reducer<Student, NullWritable, Text, NullWritable> {
@Override
protected void reduce(Student key, Iterable<NullWritable> values,Context context)
throws IOException, InterruptedException {
for(NullWritable value: values){
Student student = key;
String studentInfo = student.getName() + "\t"
+ student.getGender() + "\t"
+ student.getAge() + "\t"
+ student.getPhone() + "\t"
+ student.getMajor();
context.write(new Text(studentInfo), NullWritable.get());
}
}
}
三、学生成绩求和
1、ScoreDriver
package net.sherry.sum;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class ScoreDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("dfs.client.use.datanode.hostname", "true");
Job job = Job.getInstance(conf);
job.setJarByClass(ScoreDriver.class);
job.setMapperClass(ScoreMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(ScoreReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
String uri = "hdfs://hadoop102:8020";
String user = "sherry";
Path inputPath = new Path(uri + "/wordcount/input/score.txt");
Path outputPath = new Path(uri + "/wordcount/output/score");
FileSystem fs = FileSystem.get(new URI(uri), conf, user);
fs.delete(outputPath, true);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
System.out.println("======统计结果======");
FileStatus[] fileStatuses = fs.listStatus(outputPath);
for (int i = 1; i < fileStatuses.length; i++) {
System.out.println(fileStatuses[i].getPath());
FSDataInputStream in = fs.open(fileStatuses[i].getPath());
IOUtils.copyBytes(in, System.out, 4096, false);
}
}
}
2、ScoreMapper
package net.sherry.sum;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class ScoreMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException{
String line = value.toString();
String[] fields = line.split(" ");
String name = fields[0].trim();
for (int i = 1; i< fields.length; i++){
System.out.println(fields.length);
int score = Integer.parseInt(fields[i]);
context.write(new Text(name), new IntWritable(score));
}
}
}
3、ScoreReduce
package net.sherry.sum;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.text.DecimalFormat;
public class ScoreReducer extends Reducer<Text, IntWritable, Text, NullWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int count = 0;
int sum = 0;
double avg = 0;
for (IntWritable value : values){
count++;
sum += value.get();
}
avg = sum *1.0 / count;
DecimalFormat df = new DecimalFormat("#.#");
String scoreInfo = key + " " + sum + " " + df.format(avg);
context.write(new Text(scoreInfo), NullWritable.get());
}
}