BFS(G, s)
for each vertex u ∈ V [G] - {s}
do color[u] ← WHITE
d[u] ← ∞
π[u] ← NIL
//除了源顶点s之外,第1-4行置每个顶点为白色,置每个顶点u的d[u]为无穷大,
//置每个顶点的父母为NIL。
color[s] ← GRAY
//第8行,将源顶点s置为灰色,这是因为在过程开始时,源顶点已被发现。
d[s] ← 0 //将d[s]初始化为0。
π[s] ← NIL //将源顶点的父顶点置为NIL。
Q ← Ø
ENQUEUE(Q, s) //入队
//第12、13行,初始化队列Q,使其仅含源顶点s。
while Q ≠ Ø
do u ← DEQUEUE(Q) //出队
//第16行,确定队列头部Q头部的灰色顶点u,并将其从Q中去掉。
for each v ∈ Adj[u] //for循环考察u的邻接表中的每个顶点v
do if color[v] = WHITE
then color[v] ← GRAY //置为灰色
d[v] ← d[u] + 1 //距离被置为d[u]+1
π[v] ← u //u记为该顶点的父母
ENQUEUE(Q, v) //插入队列中
color[u] ← BLACK //u 置为黑色
package com.joey.mapred.graph.utils;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.io.Text;
public class Node {
/**
* three possible colors a node can have to keep track
* of the visiting status of the nodes during graph search
*/
public static enum Color {
WHITE, //unvisited
GRAY, // visited, unprocess
BLACK // processed
};
private String id; // id of the node
private int distance; // distance of the node from source node
// list of the edges
private List edges = new ArrayList();
private Color color = Color.WHITE;
// parent/ predecessor of the node
// The parent of the source is marked "source" to leave it unchanged
private String parent;
public Node() {
distance = Integer.MAX_VALUE;
color = Color.WHITE;
parent = null;
}
public Node(String nodeInfo) {
String[] inputVal = nodeInfo.split("\t");
String key = "";
String val = "";
try {
key = inputVal[0]; // node id
// the list of adjacent nodes, distance, color, parent
val = inputVal[1];
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
String[] tokens = val.split("\\|");
this.id = key;
for (String s : tokens[0].split(",")) {
if (s.length() > 0) edges.add(s);
}
if (tokens[1].equalsIgnoreCase("Integer.MAX_VALUE")) {
this.distance = Integer.MAX_VALUE;
} else {
this.distance = Integer.parseInt(tokens[1]);
}
this.color = Color.valueOf(tokens[2]);
this.parent = tokens[3];
}
public Text getNodeInfo() {
StringBuilder sb = new StringBuilder();
for (String v : edges) {
sb.append(v).append(",");
}
sb.append("|");
if (this.distance < Integer.MAX_VALUE) {
sb.append(this.distance).append("|");
} else {
sb.append("Integer.MAX_VALUE").append("|");
}
sb.append(color.toString()).append("|");
sb.append(getParent());
return new Text(sb.toString());
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public int getDistance() {
return distance;
}
public void setDistance(int distance) {
this.distance = distance;
}
public List getEdges() {
return edges;
}
public void setEdges(List edges) {
this.edges = edges;
}
public Color getColor() {
return color;
}
public void setColor(Color color) {
this.color = color;
}
public String getParent() {
return parent;
}
public void setParent(String parent) {
this.parent = parent;
}
}
package com.joey.mapred.graph;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import com.joey.mapred.graph.utils.Node;
import com.joey.mapred.graph.utils.Node.Color;
public class TraverseGraph {
/**
*
* Description : Mapper class that implements the map part of Breadth-first
* search algorithm. The nodes colored WHITE or BLACK are emitted as such. For
* each node that is colored GRAY, a new node is emitted with the distance
* incremented by one and the color set to GRAY. The original GRAY colored
* node is set to BLACK color and it is also emitted.
*
* Input format : list_of_adjacent_nodes|distance_from_the_source|color|parent>
*
* Output format :
*
* Reference :
* http://www.johnandcailin.com/blog/cailin/breadth-first-graph-search
* -using-iterative-map-reduce-algorithm
*
*/
// the type parameters are the input keys type, the input values type, the
// output keys type, the output values type
public static class TraverseMapper extends Mapper
Driver.java
该class相对来说就简单点,不用过多的解释了
package com.joey.mapred.graph;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ToolRunner;
import com.joey.mapred.BaseDriver;
import com.joey.mapred.graph.TraverseGraph.TraverseMapper;
import com.joey.mapred.graph.TraverseGraph.TraverseReducer;
import com.joey.mapred.graph.utils.Node;
/**
* Description : MapReduce program to solve the single-source shortest path
* problem using parallel breadth-first search. This program also illustrates
* how to perform iterative map-reduce.
*
* The single source shortest path is implemented by using Breadth-first search
* concept.
*
* Reference :
* http://www.johnandcailin.com/blog/cailin/breadth-first-graph-search
* -using-iterative-map-reduce-algorithm
*
*/
public class BFSearchDriver extends BaseDriver {
static class SearchMapperSSSP extends TraverseMapper {
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
Node inNode = new Node(value.toString());
// calls the map method of the super class SearchMapper
super.map(key, value, context, inNode);
}
}
static class SearchReducerSSSP extends TraverseReducer {
// the parameters are the types of the input key, the values associated with
// the key and the Context object through which the Reducer communicates
// with the Hadoop framework
public void reduce(Text key, Iterable values, Context context)
throws IOException, InterruptedException {
// create a new out node and set its values
Node outNode = new Node();
// call the reduce method of SearchReducer class
outNode = super.reduce(key, values, context, outNode);
// if the color of the node is gray, the execution has to continue, this
// is done by incrementing the counter
if (outNode.getColor() == Node.Color.GRAY)
context.getCounter(MoreIterations.numberOfIterations).increment(1L);
}
}
public int run(String[] args) throws Exception {
int iterationCount = 0; // counter to set the ordinal number of the
// intermediate outputs
Job job;
long terminationValue = 1;
// while there are more gray nodes to process
while (terminationValue > 0) {
job = getJobConf(args); // get the job configuration
String input, output;
// setting the input file and output file for each iteration
// during the first time the user-specified file will be the input whereas
// for the subsequent iterations
// the output of the previous iteration will be the input
if (iterationCount == 0) {
// for the first iteration the input will be the first input argument
input = args[0];
} else {
// for the remaining iterations, the input will be the output of the
// previous iteration
input = args[1] + iterationCount;
}
output = args[1] + (iterationCount + 1); // setting the output file
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output));
job.waitForCompletion(true);
Counters jobCntrs = job.getCounters();
terminationValue = jobCntrs
.findCounter(MoreIterations.numberOfIterations).getValue();
iterationCount++;
}
return 0;
}
static enum MoreIterations {
numberOfIterations
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new BFSearchDriver(), args);
if(args.length != 2){
System.err.println("Usage:
BaseDriver.java
package com.joey.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.Tool;
public abstract class BaseDriver extends Configured implements Tool {
// method to set the configuration for the job and
// the mapper and the reducer classes
protected Job setupJob(String jobName, JobInfo jobInfo) throws Exception {
Configuration conf = getConf();
if (conf == null) {
throw new RuntimeException("Configuration should not be null");
}
Job job = new Job(conf, jobName);
// set the several classes
job.setJarByClass(jobInfo.getJarByClass());
// set the mapper class
job.setMapperClass(jobInfo.getMapperClass());
// the combiner class is optional, so set it only if it
// is required by the program
if (jobInfo.getCombinerClass() != null)
job.setCombinerClass(jobInfo.getCombinerClass());
// set the reducer class
job.setReducerClass(jobInfo.getReducerClass());
// the number of reducers is set to 3, this can be
// altered according to the program's requirements
job.setNumReduceTasks(3);
// set the type of the output key and value for the
// Map & Reduce functions
job.setOutputKeyClass(jobInfo.getOutputKeyClass());
job.setOutputValueClass(jobInfo.getOutputValueClass());
if (jobInfo.getInputFormatClass() != null)
job.setInputFormatClass(jobInfo.getInputFormatClass());
if (jobInfo.getOutputFormatClass() != null)
job.setOutputFormatClass(jobInfo.getOutputFormatClass());
return job;
}
protected abstract Job getJobConf(String[] args) throws Exception;
protected abstract class JobInfo {
public abstract Class> getJarByClass();
public abstract Class extends Mapper> getMapperClass();
public abstract Class extends Reducer> getCombinerClass();
public abstract Class extends Reducer> getReducerClass();
public abstract Class> getOutputKeyClass();
public abstract Class> getOutputValueClass();
public abstract Class extends InputFormat> getInputFormatClass();
public abstract Class extends OutputFormat> getOutputFormatClass();
}
}
java的多态性是指main方法在调用属性的时候类可以对这一属性做出反应的情况
//package 1;
class A{
public void test(){
System.out.println("A");
}
}
class D extends A{
public void test(){
S
参考了网上的思路,写了个Java版的:
public class Fibonacci {
final static int[] A={1,1,1,0};
public static void main(String[] args) {
int n=7;
for(int i=0;i<=n;i++){
int f=fibonac
1、查看系统客户端,数据库,连接层的编码
查看方法: http://daizj.iteye.com/blog/2174993
进入mysql,通过如下命令查看数据库编码方式: mysql> show variables like 'character_set_%'; +--------------------------+------
public class MyQueue {
private long[] arr;
private int front;
private int end;
// 有效数据的大小
private int elements;
public MyQueue() {
arr = new long[10];
elements = 0;
front
A binary search tree (BST) is a binary tree where each node has a Comparable key (and an associated value) and satisfies the restriction that the key in any node is larger than the keys in all