org.apache.mahout.clustering.syntheticcontrol.kmeans.run(Configuration conf, Path input, Path output,DistanceMeasure measure, int k, double convergenceDelta,int maxIterations),这是我们分析的起点:
public static void run(Configuration conf, Path input, Path output,DistanceMeasure measure, int k, double convergenceDelta,int maxIterations) throws Exception {
Path directoryContainingConvertedInput = new Path(output,DIRECTORY_CONTAINING_CONVERTED_INPUT);
log.info("Preparing Input");
InputDriver.runJob(input, directoryContainingConvertedInput,"org.apache.mahout.math.RandomAccessSparseVector");
log.info("Running random seed to get initial clusters");
Path clusters = new Path(output, Cluster.INITIAL_CLUSTERS_DIR);
clusters = RandomSeedGenerator.buildRandom(conf,directoryContainingConvertedInput, clusters, k, measure);
log.info("Running KMeans");
KMeansDriver.run(conf, directoryContainingConvertedInput, clusters,output, measure, convergenceDelta, maxIterations, true, false);
// run ClusterDumper
ClusterDumper clusterDumper = new ClusterDumper(finalClusterPath(conf,output, maxIterations), new Path(output, "clusteredPoints"));
clusterDumper.printClusters(null);
}
log.info("Preparing Input");
InputDriver.runJob(input, directoryContainingConvertedInput,"org.apache.mahout.math.RandomAccessSparseVector");
log.info("Running random seed to get initial clusters");
Path clusters = new Path(output, Cluster.INITIAL_CLUSTERS_DIR);
clusters = RandomSeedGenerator.buildRandom(conf,directoryContainingConvertedInput, clusters, k, measure);
3. 最后来分析一下k-meas算法的核心部分 :
log.info("Running KMeans");
KMeansDriver.run(conf, directoryContainingConvertedInput, clusters,output, measure, convergenceDelta, maxIterations, true, false);
Path clustersOut = buildClusters(conf, input, clustersIn, output,measure, maxIterations, delta, runSequential);
if (runClustering) {
log.info("Clustering data");
clusterData(conf, input, clustersOut, new Path(output,AbstractCluster.CLUSTERED_POINTS_DIR), measure, delta,runSequential);
}
public static Path buildClusters(Configuration conf, Path input,Path clustersIn, Path output, DistanceMeasure measure,int maxIterations, String delta, boolean runSequential)
throws IOException, InterruptedException, ClassNotFoundException {
if (runSequential) {
return buildClustersSeq(conf, input, clustersIn, output, measure,maxIterations, delta);
} else {
return buildClustersMR(conf, input, clustersIn, output, measure,maxIterations, delta);
}
}
private static Path buildClustersMR(Configuration conf, Path input,Path clustersIn, Path output, DistanceMeasure measure,int maxIterations, String delta) throws IOException,
InterruptedException, ClassNotFoundException {
boolean converged = false;
int iteration = 1;
while (!converged && iteration <= maxIterations) {
log.info("K-Means Iteration {}", iteration);
// point the output to a new directory per iteration
Path clustersOut = new Path(output, AbstractCluster.CLUSTERS_DIR+ iteration);
converged = runIteration(conf, input, clustersIn, clustersOut,measure.getClass().getName(), delta);
// now point the input to the old output directory
clustersIn = clustersOut;
iteration++;
}
Path finalClustersIn = new Path(output, AbstractCluster.CLUSTERS_DIR+ (iteration - 1) + "-final");
FileSystem.get(conf).rename(new Path(output, AbstractCluster.CLUSTERS_DIR+ (iteration - 1)), finalClustersIn);
return finalClustersIn;
}
每一次迭代生成生成的文件放在不同的目录(output/clusters-i),在runIteration()方法中启动一个mapReduce任务将输入文件中的全部点划分到不同的Cluster中,并更新Cluster的相关属性:
private static boolean runIteration(Configuration conf, Path input,Path clustersIn, Path clustersOut, String measureClass,
String convergenceDelta) throws IOException, InterruptedException,ClassNotFoundException {
conf.set(KMeansConfigKeys.CLUSTER_PATH_KEY, clustersIn.toString());
conf.set(KMeansConfigKeys.DISTANCE_MEASURE_KEY, measureClass);
conf.set(KMeansConfigKeys.CLUSTER_CONVERGENCE_KEY, convergenceDelta);
Job job = new Job(conf,"KMeans Driver running runIteration over clustersIn: "+ clustersIn);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(ClusterObservations.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Cluster.class);
job.setInputFormatClass(SequenceFileInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setMapperClass(KMeansMapper.class);
job.setCombinerClass(KMeansCombiner.class);
job.setReducerClass(KMeansReducer.class);
FileInputFormat.addInputPath(job, input);
FileOutputFormat.setOutputPath(job, clustersOut);
job.setJarByClass(KMeansDriver.class);
HadoopUtil.delete(conf, clustersOut);
if (!job.waitForCompletion(true)) {
throw new InterruptedException("K-Means Iteration failed processing " + clustersIn);
}
FileSystem fs = FileSystem.get(clustersOut.toUri(), conf);
return isConverged(clustersOut, conf, fs);
}
protected void setup(Context context) throws IOException,InterruptedException {
super.setup(context);
Configuration conf = context.getConfiguration();
DistanceMeasure measure = ClassUtils.instantiateAs(conf.get(KMeansConfigKeys.DISTANCE_MEASURE_KEY),DistanceMeasure.class);
measure.configure(conf);
this.clusterer = new KMeansClusterer(measure);
String clusterPath = conf.get(KMeansConfigKeys.CLUSTER_PATH_KEY);
if (clusterPath != null && !clusterPath.isEmpty()) {
KMeansUtil.configureWithClusterInfo(conf, new Path(clusterPath),clusters);
if (clusters.isEmpty()) {
throw new IllegalStateException("No clusters found. Check your -c path.");
}
}
}
public void emitPointToNearestCluster(Vector point,Iterable clusters, Mapper, ?, Text, ClusterObservations>.Context context)
throws IOException, InterruptedException {
Cluster nearestCluster = null;
double nearestDistance = Double.MAX_VALUE;
for (Cluster cluster : clusters) {
Vector clusterCenter = cluster.getCenter();
double distance = this.measure.distance(clusterCenter.getLengthSquared(), clusterCenter, point);
if (log.isDebugEnabled()) {
log.debug("{} Cluster: {}", distance, cluster.getId());
}
if (distance < nearestDistance || nearestCluster == null) {
nearestCluster = cluster;
nearestDistance = distance;
}
}
context.write(new Text(nearestCluster.getIdentifier()), new ClusterObservations(1, point, point.times(point)));
}
protected void reduce(Text key, Iterable values, Context context)
throws IOException, InterruptedException {
Cluster cluster = new Cluster();
for (ClusterObservations value : values) {
cluster.observe(value);
}
context.write(key, cluster.getObservations());
}
protected void reduce(Text key, Iterable values, Context context)
throws IOException, InterruptedException {
Cluster cluster = clusterMap.get(key.toString());
for (ClusterObservations delta : values) {
cluster.observe(delta);
}
// force convergence calculation
boolean converged = clusterer.computeConvergence(cluster, convergenceDelta);
if (converged) {
context.getCounter("Clustering", "Converged Clusters").increment(1);
}
cluster.computeParameters();
context.write(new Text(cluster.getIdentifier()), cluster);
}
在reduce方法中,根据待迭代的key在clusterMap中取出对应的Cluster,因为key相同的键值对对应了一个Cluster中所有的点,所以遍历这个key对应的values,就可以遍历这个cluster的所有点。对每个value(ClusterObservations对象)调用cluster.observe(value)方法,在该方法中,实际上是将每个ClusterObservations的S0累加到cluster的S0属性中,S1累加到cluster的S1中,S2累加到cluster的S2中,并且在初始时,每个cluster的S0=0,S1=null,S2=null,这个从 cluster.computeParameters()方法中可以看出来。我的理解是S0是cluster中的点的个数,S1是cluster中每个点对应的VectorWritable对象各个分量的和构成的一个Vector,S2是cluster中每个点对应的VectorWritable对象各个分量的平方的和构成的一个Vector。
迭代完每个key对应的values之后,这个cluster对应的S0(起始为0),S1(起始为null),S2(起始为null)三个属性的值都被赋与了新的值,可以利用这些值来计算当前cluster是否收敛:clusterer.computeConvergence(cluster, convergenceDelta),如果收敛,则将已经收敛的Cluster的计数器加1,然后,调用computeParameters方法计算这个cluster的各个其它属性值,包括numPoints,center,radius这三个属性,同时将S0置0,S1,S2置null :
public void computeParameters() {
if (getS0() == 0) {
return;
}
setNumPoints((int) getS0());
setCenter(getS1().divide(getS0()));
// compute the component stds
if (getS0() > 1) {
setRadius(getS2().times(getS0()).minus(getS1().times(getS1())).assign(new SquareRootFunction()).divide(getS0()));
}
setS0(0);
setS1(null);
setS2(null);
}
out.writeUTF(measure.getClass().getName());
out.writeInt(id);
out.writeLong(getNumPoints());
VectorWritable.writeVector(out, getCenter());
VectorWritable.writeVector(out, getRadius());
out.writeBoolean(converged);
可以看到,写入的内容有DistanceMeasure对象,cluster的id,点的个数,center,radius,是否收敛。
再回到runIteration()方法中,最后要判断一下执行完本次迭代之后,是否所有的Cluster已经全部收敛。执行完mapReduce任务之后,本次迭代的结果(得到的k个Cluster)写入到了输出文件中clustersOut(output/clusters-iteration),这里读取该目录下的文件(如果有多少reducer的话,会生成多个文件,各个Cluster分布在这多个文件中),取出每一个Cluster,判断它是否已经收敛,只要有一个是不收敛的,就直接返回false,表明还没有全局收敛,要继续执行下一次迭代。
private static boolean isConverged(Path filePath, Configuration conf,FileSystem fs)throws IOException {
for (FileStatus part : fs.listStatus(filePath, PathFilters.partFilter())) {
SequenceFileValueIterator iterator = new SequenceFileValueIterator(part.getPath(), true, conf);
while (iterator.hasNext()) {
Cluster value = iterator.next();
if (!value.isConverged()) {
Closeables.closeQuietly(iterator);
return false;
}
}
}
return true;
}
Path finalClustersIn = new Path(output, AbstractCluster.CLUSTERS_DIR+ (iteration - 1) + "-final");
FileSystem.get(conf).rename(new Path(output, AbstractCluster.CLUSTERS_DIR+ (iteration - 1)), finalClustersIn);
return finalClustersIn;
if (runClustering) {
log.info("Clustering data");
clusterData(conf, input, clustersOut, new Path(output,AbstractCluster.CLUSTERED_POINTS_DIR), measure, delta,runSequential);
}
clusterData方法调用clusterDataMR()方法启动一个mapReduce任务,根据最后生成的clustersOut目录(保存了最终的每个Cluster的相关信息)和转换后的原始数据文件,决定各个Cluster分别包含哪些点。结果写入了output/clusteredPoints目录。至此KMeansDriver.run()方法结束。
最后 Job.run()方法使用ClusterDumper输出具体聚类结果(每个Cluster包含哪些点,点数,center,radius等)。