/**
* Generate the list of files and make them into FileSplits.
* @param job the job context
* @throws IOException
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
//获取InputSplit的size的最小值minSize和最大值maxSize
/*
getFormatMinSplitSize()=1
getMinSplitSize(job)=0
所以最终minSize=1
*/
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
/*
getMaxSplitSize(job)=Long.MAX_VALUE
所以maxSize等于Long的最大值
*/
long maxSize = getMaxSplitSize(job);
// 创建List,准备保存生成的InputSplit
List<InputSplit> splits = new ArrayList<InputSplit>();
//获取输入文件列表
List<FileStatus> files = listStatus(job);
/*
!getInputDirRecursive(job) = !false = true
job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false) = false
所以ignoreDirs=false
*/
boolean ignoreDirs = !getInputDirRecursive(job)
&& job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
//迭代输入文件列表
for (FileStatus file: files) {
//是否忽略子目录,默认不忽略
if (ignoreDirs && file.isDirectory()) {
continue;
}
//获取 文件/目录 路径
Path path = file.getPath();
//获取 文件/目录 长度
long length = file.getLen();
if (length != 0) {
//保存文件的Block块所在的位置
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
//判断文件是否支持切割,默认为true
if (isSplitable(job, path)) {
//获取文件的Block大小,默认128M
long blockSize = file.getBlockSize();
//计算split的大小
/*
内部使用的公式是:Math.max(minSize, Math.min(maxSize, blockSize))
splitSize = Math.max(1, Math.min(Long.MAX_VALUE, 128))=128M=134217728字节
所以我们说默认情况下split逻辑切片的大小和Block size相等
*/
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
//还需要处理的文件剩余字节大小,其实就是这个文件的原始大小
long bytesRemaining = length;
//
/*
SPLIT_SLOP = 1.1
文件剩余字节大小/1134217728【128M】 > 1.1
意思就是当文件剩余大小bytesRemaining与splitSize的比值还大于1.1的时候,就继续切分,
否则,剩下的直接作为一个InputSplit
敲黑板,划重点:只要bytesRemaining/splitSize<=1.1就会停止划分,将剩下的作为一个InputSplit
*/
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
//组装InputSplit
/*
生成InputSplit
path:路径
length-bytesRemaining:起始位置
splitSize:大小
blkLocations[blkIndex].getHosts()和blkLocations[blkIndex].getCachedHosts():所在的 host(节点) 列表
makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts())
*/
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
//最后会把bytesRemaining/splitSize<=1.1的那一部分内容作为一个InputSplit
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else {
// not splitable
//如果文件不支持切割,执行这里
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
//把不支持切割的文件整个作为一个InputSplit
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
Hadoop-InputFormat之getSplits源码剖析
猜你喜欢
转载自blog.csdn.net/qq_31776219/article/details/112599148
今日推荐
周排行