HDFS 文件合并及上传至服务器

/*

 *   合并多个小文件,并传至服务器

  为什么要合并?

      从存储角度:小文件占用太多NameNode元数据信息资源,NN资源利用率不高效;合并后占用NN的内存小了,NN就有更多的内存去管理更大规模的集群

      从计算资源的角度:一个小文件占用一个block,一个block被一个map处理,计算资源消耗太多。合并后多个小文件占用一个block,占用的map资源少,减少了计算资源的消耗。而且,从用户角度来看, 耗费的时间,合并后处理文件耗费的时间与多map处理多个小文件耗费的时间相差不大,且节省了资源。

 */

package com.hadoop.hdfs;

import java.io.IOException;

importjava.net.URI;

importjava.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataInputStream;

import org.apache.hadoop.fs.FSDataOutputStream;

import org.apache.hadoop.fs.FileStatus;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.FileUtil;

import org.apache.hadoop.fs.LocalFileSystem;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.fs.PathFilter;

import org.apache.hadoop.io.IOUtils;

public class combineFilesCopyToHDFS {

    public static class RegexAcceptFilter implements PathFilter{

        private static int select;

        private static String regexString;

        public RegexAcceptFilter(String regex,int mod){

            this.regexString=regex;

            this.select=mod;

        }

        @Override

        public boolean accept(Path path) {

            if (select==0) {

                boolean b = path.toString().matches(regexString);

                return (!b);

            }else{

                boolean matches = path.toString().matches(regexString);

                return matches;

            }

        }

    }

    public static void combineUp(String srcPathString,String dstPathString) throws IOException, URISyntaxException{原理:打开输入输出文件流,通过copyBytes()实现输入输出流的传输。

        Configuration configuration = new Configuration();

        FileSystem fileSystem = FileSystem.get(new URI("hdfs://dajiangtai:9000/"), configuration);

        LocalFileSystem localFileSystem = FileSystem.getLocal(configuration);

        if(!fileSystem.exists(new Path(dstPathString))){//检查目标路径是否存在并创建

            fileSystem.mkdirs(new Path(dstPathString));

        }

        FileStatus[] globStatus = localFileSystem.globStatus(new Path(srcPathString), new RegexAcceptFilter("^.*svn$", 0));

        Path[] stat2Paths = FileUtil.stat2Paths(globStatus);

        for (Path dirPath : stat2Paths) {

            String filename = dirPath.getName();//获得路径文件夹名

            String replaceString = filename.replace("-", "");

            Path blockPath = new Path(dstPathString+replaceString+".txt");

            FileStatus[] childFileStatus = localFileSystem.globStatus(new Path(srcPathString+filename+"/*"), new RegexAcceptFilter("^.*txt$", 1));

            Path[] childpaths = FileUtil.stat2Paths(childFileStatus);//显示从根目录到目标目录的路径

            FSDataOutputStream fsDataOutputStream;

            FSDataInputStream fsDataInputStream;

            //输入流

            if(!fileSystem.exists(blockPath)){

                fsDataOutputStream = fileSystem.create(blockPath);//目标文件不存在则创建

            }else {

                fsDataOutputStream=fileSystem.append(blockPath);//存在则追加

            }

            for (Path path : childpaths) {//子目录内的待被读取文件

                fsDataInputStream = localFileSystem.open(path);//输出流

                IOUtils.copyBytes(fsDataInputStream, fsDataOutputStream, 4096, false);

                fsDataInputStream.close();//强调   关闭

            }

            fsDataOutputStream.close();//强调   关闭

        }

        fileSystem.close();//强调   关闭

    }

    public static void main(String[] args) throws IOException, URISyntaxException {

        String []arg0={

            "D:/大数据文件/ppt/data/73/*",

            "hdfs://dajiangtai:9000/middle/tv/"

        };

        combineUp(arg0[0], arg0[1]);

    }

}

猜你喜欢

转载自blog.csdn.net/lrf2454224026/article/details/82049021