MappperClass.java文件
package Kmeans;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.LineReader;
//数据点坐标形式
class DmRecord {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
private double xpodouble; //X坐标
private double ypodouble; //Y坐标
//构造函数
public DmRecord()
{
}
public DmRecord(String name,double x,double y)
{
this.name = name;
this.xpodouble = x;
this.ypodouble = y;
}
public double getXpoint()
{
return xpodouble;
}
public void setXpoint(double xpodouble)
{
this.xpodouble = xpodouble;
}
public double getYpoint()
{
return ypodouble;
}
public void setYpoint(double ypodouble) {
this.ypodouble = ypodouble;
}
//计算俩点之间距离
public double distance(DmRecord record)
{
return Math.sqrt(Math.pow(this.xpodouble-record.xpodouble, 2)+Math.pow(this.ypodouble-record.ypodouble, 2));
}
}
//辅助类
class DmRecordParser
{
private Map
urlMap = new HashMap();
//读取聚类中心点文件,初始化聚类点
public void initialize(String file) throws IOException
{
//指定configuration
Configuration config = new Configuration();
//定义一个DataInputStream
FSDataInputStream indic = null;
FileSystem fs = FileSystem.get(URI.create(file),config);
indic = fs.open(new Path(file));
String t = indic.readLine();
while (t != null)
{
//split函数注意点:split(" ")如果中间有不止一个空格,那么它会把多余空格也当做字符,所以一定要保证只有一个空格
//由于hadoop在key和value之间会输出制表符\t所以用空格代替,一个制表符相当4个空格那么大
String [] strKey = t.replace("\t", " ").split(" ");;
urlMap.put(strKey[0],parse(t));
t = indic.readLine();
}
}
//从new;old文件中获取中心点的坐标
public static List> getCenters(String inputpath){
List> result = new ArrayList>();
Configuration conf = new Configuration();
try {
FileSystem hdfs = FileSystem.get(conf);
Path in = new Path(inputpath);
FSDataInputStream fsIn = hdfs.open(in);
LineReader lineIn = new LineReader(fsIn, conf);
Text line = new Text();
while (lineIn.readLine(line) > 0){
String record = line.toString();
/*
因为Hadoop输出键值对时会在键跟值之间添加制表符,
所以用空格代替之。
*/
String[] fields = record.replace("\t", " ").split(" ");
List tmplist = new ArrayList();
//由于第一个是K1编号,所以不录入
for (int i = 1; i < fields.length; ++i){
tmplist.add(Double.parseDouble(fields[i]));
}
result.add((ArrayList) tmplist);
}
fsIn.close();
} catch (IOException e){
e.printStackTrace();
}
return result;
}
//计算相邻两次迭代结果的聚类中心的距离,判断是否满足终止条件 ,这里计算的最终结果没有开根号
public static boolean isFinished(String oldpath, String newpath,double threshold)
throws IOException{
List> oldcenters = getCenters(oldpath);
List> newcenters = getCenters(newpath);
double distance = 0;
for (int i = 0; i < 2; i++){
for (int j = 0; j < oldcenters.get(i).size(); j++){
double tmp = Math.abs(oldcenters.get(i).get(j) - newcenters.get(i).get(j));
distance += Math.pow(tmp, 2);
}
}
/*
如果不满足终止条件,则用本次迭代的聚类中心更新聚类中心
由于最后一次,肯定满足,得用conf.txt最为最终聚类点,所以,每次都更新中心
*/
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
//将hdfs上面得newconfig.txt下载到本地,命名为conf.txt
hdfs.copyToLocalFile(new Path(newpath), new Path("/usr/conf.txt"));
//删除conf.txt和newconf.txt
hdfs.delete(new Path(oldpath), true);
hdfs.delete(new Path(newpath), true);
//将下载下来的/usr/conf.txt传到hdfs上作为cong.txt
hdfs.moveFromLocalFile(new Path("/usr/conf.txt"), new Path(oldpath));
if (distance < threshold)
{ return true;}
else{
return false;
}
}
/**
* 生成坐标对象
*/
public DmRecord parse(String line){
String [] strPlate = line.replace("\t", " ").split(" ");
DmRecord Dmurl = new DmRecord(strPlate[0],Double.parseDouble(strPlate[1]),Double.parseDouble(strPlate[2]));
return Dmurl;
}
/**
* 获取分类中心坐标
*/
public DmRecord getUrlCode(String cluster)
{
DmRecord returnCode = null;
DmRecord dmUrl = (DmRecord)urlMap.get(cluster);
if(dmUrl == null)
{
returnCode = null;
}else
{
returnCode =dmUrl;
}
return returnCode;
}
}
public class MapperClass extends Mapper{
private DmRecordParser drp=new DmRecordParser() ;
private DmRecord record0 = new DmRecord();
private DmRecord record1 = new DmRecord();
//这里这个不能在这里设置。否则下一次调用Map函数的时候,这些值已经被更新了。所以得在下面Map函数里面设置,否则初始只会执行一次。
//private double Min_distance = 9999;
//private int tmpK = 0;
private Text tKey = new Text();
//获取聚类中心坐标
//这个函数在调用Map函数之前自动执行,且整个过程只执行一次,相当于初始化函数吧
protected void setup(Context context
) throws IOException, InterruptedException
{
try {
drp.initialize("hdfs://hadoop:9000/hxy/conf.txt");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
//map输出的结果 context.write会自动根据key值排序。哪怕后面没有reduce函数
protected void map(Object key,Text value,Context context) throws IOException,InterruptedException
{
String str=value.toString();
StringTokenizer stringTokenizer =new StringTokenizer(str);
String pname=stringTokenizer.nextToken(); //pname
String xpoint=stringTokenizer.nextToken();//xpoint
String ypoint=stringTokenizer.nextToken();//ypoint
record1.setName(pname);
record1.setXpoint(Double.parseDouble(xpoint));
record1.setYpoint(Double.parseDouble(ypoint));
double Min_distance = 9999;
int tmpK = 0;
for(int i=1; i <= 2; i++)
{
record0 = drp.getUrlCode("K"+i);
if(record0.distance(record1) < Min_distance){
tmpK = i;
Min_distance = record0.distance(record1);
}
}
tKey.set("C"+tmpK);
context.write(tKey, value);
}
}
Reducerclass.java文件
package Kmeans; import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class ReducerClass extends Reducer{ private Text tKey = new Text(); private Text tValue = new Text(); protected void reduce(Text key,Iterable value, Context context) throws IOException,InterruptedException { double avgX=0; double avgY=0; double sumX=0; double sumY=0; int count=0; String [] strValue = null; while(value.iterator().hasNext()){ count++; strValue = value.iterator().next().toString().replace("\t", " ").split(" "); //分离出一个点 如A 2 3 sumX = sumX + Double.parseDouble(strValue[1]); //X坐标 sumY = sumY + Double.parseDouble(strValue[2]); //Y坐标 } avgX = sumX/count; avgY = sumY/count; tKey.set("K"+key.toString().substring(1,2)); tValue.set(avgX + "\t" + avgY); System.out.println("K"+key.toString().substring(1,2)+"\t"+avgX + "\t" + avgY); context.write(tKey, tValue); } }
Kmeans.java文件
package Kmeans; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; public class Kmeans { public static void main(String[] args) throws Exception { int repeated = 0; // 不断提交MapReduce作业指导相邻两次迭代聚类中心的距离小于阈值或到达设定的迭代次数 do { //专门处理配置文件的类 Configuration conf = new Configuration(); //错误处理,可以不写的吧 String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: wordcount "); System.exit(2); } //创建job对象 Job job = new Job(conf, "rate"); //如果需要打包jar则必须这一句 job.setJarByClass(Kmeans.class); job.setMapperClass(MapperClass.class); //设置自定义的Reduc类 job.setReducerClass(ReducerClass.class); //设置输出的键的类型 job.setOutputKeyClass(Text.class); //设置输出的值得类型。这个事、是Map和Reducer俩个的输出 job.setOutputValueClass(Text.class); //作业输入路径 FileInputFormat.addInputPath(job, new Path(otherArgs[0])); Path out = new Path(otherArgs[1]); FileSystem fs = FileSystem.get(conf); //由于每次运行完后已经产生了outputkmeans文件。所以得把原来的删除 if (fs.exists(out)){ fs.delete(out, true); } //作业输出路径 FileOutputFormat.setOutputPath(job, out); job.waitForCompletion(true);//启动作业 ++repeated; } while(repeated < 10&&(DmRecordParser.isFinished("hdfs://hadoop:9000/hxy/conf.txt", "hdfs://hadoop:9000/hxy/outputkmeans/part-r-00000", 1.0) == false)); //根据最终得到的聚类中心对数据集进行聚类 Cluster(args); } public static void Cluster(String[] args) throws IOException, InterruptedException, ClassNotFoundException{ Configuration conf = new Configuration(); //错误处理,可以不写的吧 String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: wordcount "); System.exit(2); } Job job = new Job(conf, "rate"); job.setJarByClass(Kmeans.class); //只需要Map一下,不需要再次reduce job.setMapperClass(MapperClass.class); //设置输出的键的类型 job.setOutputKeyClass(Text.class); //设置输出的值得类型。这个事、是Map和Reducer俩个的输出 job.setOutputValueClass(Text.class); //作业输入路径 FileInputFormat.addInputPath(job, new Path(otherArgs[0])); Path out = new Path(otherArgs[1]); FileSystem fs = FileSystem.get(conf); //由于每次运行完后已经产生了outputkmeans文件。所以得把原来的删除 if (fs.exists(out)){ fs.delete(out, true); } //作业输出路径 FileOutputFormat.setOutputPath(job, out); job.waitForCompletion(true);//启动作业 } }