setup()
在任务开始时调用一次
map()
被Inputsplit中的每一个键值对调用一次,大多数应用程序应该覆盖它,但默认是identity函数(同一个)
protected void map(KEYIN key, VALUEIN value,
Context context) throws IOException, InterruptedException {
context.write((KEYOUT) key, (VALUEOUT) value);//传进来什么值,就写出去什么值
}
cleanup()
任务结束时调用一次
run()
专家用户可以重写此方法,以便对执行mapper
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
//while死循环,每次获取下一个值
while (context.nextKeyValue()) {
map(context.getCurrentKey(), context.getCurrentValue(), context);//相当于一行一行的走,获取当前键和值
}
} finally {
cleanup(context);
}
}
computeSplitSize(blockSize,minSize,maxSize)
Math.max(minSize,Math.min(maxSize,blockSize))
最终默认大小为splitsize=128m /**
* Generate the list of files and make them into FileSplits. 生成文件列表并将其添加到FileSplits
* @param job the job context 对作业上下文进行参数设置
* @throws IOException 抛出异常
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));//获取两者间最大值
long maxSize = getMaxSplitSize(job);//获取逻辑切分的最大值,Long.MAX_VALUE
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();//最终返回的是输入切分的列表
List<FileStatus> files = listStatus(job);//获取文件元数据
for (FileStatus file: files) {
Path path = file.getPath();//获取文件输入路径
long length = file.getLen();//获取文件的长度
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
//判断文件类型
blkLocations = ((LocatedFileStatus) file).getBlockLocations();//得到文件的块位置信息
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
//判断文件是否可切分,压缩文件不可切分,除了.bz2文件
long blockSize = file.getBlockSize();//获取文件块大小,默认128m
long splitSize = computeSplitSize(blockSize, minSize, maxSize);//计算逻辑切分大小。默认128m
long bytesRemaining = length;//将文件长度赋予字节剩余
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
//判断比例是否大于1.1
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);//获取块索引,从0字节开始,获得第一个块
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
//构建逻辑切分对象,一个集合,创建逻辑切分,得到块索引的主机、缓存主机名,从而构建了一个逻辑切分
bytesRemaining -= splitSize;//获取下一个切分,再进行循环
}
if (bytesRemaining != 0) {
//剩余的字节长度不满足大于1.1的条件
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else {
// not splitable 不让切分,比如压缩文件
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//为零长度的文件创建空的主机数组
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen将输入文件的数量保存为metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;//返回切分数组
}
纯文本文件格式化类,以行进行分割,行的分割标识为回车或换行,继承了FileInputFormat抽象类,是FileInputFormat的实现类。
类说明:创建RecordReader,通过textinputformat.record.delimiter获取记录的默认分隔符,然后调用LineRecordReader(行记录读取器)
public class TextInputFormat extends FileInputFormat<LongWritable, Text> {
//继承FileInputFormat类
@Override
//创建记录读取器
public RecordReader<LongWritable, Text>
createRecordReader(InputSplit split,
TaskAttemptContext context) {
//获取默认的记录分割符
String delimiter = context.getConfiguration().get(
"textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter)
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
//调用行记录读取器
return new LineRecordReader(recordDelimiterBytes);
}
//判断文件是可切分
@Override
protected boolean isSplitable(JobContext context, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
//判断是否有压缩文件,有则返回true,没有则返回这个压缩文件,不让切分
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
}
org.apache.hadoop.mapreduce.lib.input.LineRecordReader继承了RecordReader类(记录读取器将数据分成键/值对,以便输入到Mapper)
类说明:将键视为文件中的偏移量,将值视为行
在LineRecordReader类中,Mapper通过调用nextKeyValue()方法将每行记录写到key和value中
initialize()方法,最后生成split行阅读器对象(SplitLineReader in)
SplitLineReader 继承 org.apache.hadoop.util.LineReader
LineReader类说明:从输入流中提供行读取器的类,默认按照’\r’或’\n’进行分割
LineReader类里的readline()方法【从InputStream中读取一行到给定的文本】
in.readLine(Text str, int maxLineLength,int maxBytesToConsume):
LineRecordReader 类的initialize()方法源码:
public void initialize(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
FileSplit split = (FileSplit) genericSplit;//逻辑切分
Configuration job = context.getConfiguration();
this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);//获取行的最大值
start = split.getStart();//切分的起始位置
end = start + split.getLength();//最终位置
final Path file = split.getPath();//获取路径
// 打开文件并寻找拆分的开始
final FileSystem fs = file.getFileSystem(job);
fileIn = fs.open(file);
CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
if (null!=codec) {
//如果是压缩文件
isCompressedInput = true;
decompressor = CodecPool.getDecompressor(codec);
if (codec instanceof SplittableCompressionCodec) {
final SplitCompressionInputStream cIn =
((SplittableCompressionCodec)codec).createInputStream(
fileIn, decompressor, start, end,
SplittableCompressionCodec.READ_MODE.BYBLOCK);
in = new CompressedSplitLineReader(cIn, job,
this.recordDelimiterBytes);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn;
} else {
//非压缩文件的做法,创建新的切分行读取器
in = new SplitLineReader(codec.createInputStream(fileIn,
decompressor), job, this.recordDelimiterBytes);
filePosition = fileIn;
}
} else {
fileIn.seek(start);//回到起始,按行读
in = new UncompressedSplitLineReader(//创建不带压缩格式的阅读器
fileIn, job, this.recordDelimiterBytes, split.getLength());
filePosition = fileIn;
}
public boolean nextKeyValue() throws IOException {
if (key == null) {
key = new LongWritable();
}
key.set(pos);
if (value == null) {
value = new Text();
}
int newSize = 0;
// We always read one extra line, which lies outside the upper
// split limit i.e. (end - 1)
while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
if (pos == 0) {
newSize = skipUtfByteOrderMark();
} else {
newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
pos += newSize;
}
if ((newSize == 0) || (newSize < maxLineLength)) {
break;
}
// line too long. try again
LOG.info("Skipped line of size " + newSize + " at pos " +
(pos - newSize));
}
if (newSize == 0) {
key = null;
value = null;
return false;
} else {
return true;
}
}