HDFS入门级代码

HDFS作为分布式文件系统,本人也不想过多的解释,就只想把初级的代码粘上来。

1.文件上传

public static void main(String [] args) throws IOException, InterruptedException, URISyntaxException {
    
    
		
	//文件上传
		Configuration conf= new Configuration();
		
		FileSystem fS=FileSystem.get(new URI("hdfs://hadoop02:9000") ,conf,"node");
		
		fS.copyFromLocalFile(true, new Path("f:/zhangsan.txt"), new Path("hdfs://hadoop02:9000/usr/local/zhangsan-cp.txt"));
		
		fS.close();
		System.out.println("successful");
 }

2.文件下载

 @Test
		public void putFileToHDFS() throws Exception, IOException{
    
    
			 Configuration configuration=new Configuration();
			 FileSystem fs=FileSystem.get(new URI("hdfs://hadoop02:9000"), configuration,"node");
			 fs.copyToLocalFile(false, new Path("hdfs://hadoop02:9000/usr/local/zyq-cp.txt"),new Path( "f:/zyqt.txt"), true);
			 fs.close();
		System.out.print("successful");
	}

3.文件创建

	//创建文件夹
		@Test
		public void mkdirAtHDFS()throws Exception{
    
    
			Configuration conf=new Configuration();
			
			FileSystem fs=FileSystem.get(new URI("hdfs://hadoop02:9000"),conf,"node");
			
			fs.mkdirs(new Path("hdfs://hadoop02:9000/usr/local/output01"));
		}

4.文件删除

	@Test
	public void deleteAtHDFS() throws Exception{
    
    
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(new URI("hdfs://hadoop02:9000"),conf,"node");
		fs.delete(new Path("hdfs://hadoop02:9000/usr/local/output01"),true);
		System.out.print("succeed");
	}

5.重命名文件

@Test
	public void renameAtHDFS()throws Exception{
    
    
		
		Configuration configuration=new Configuration();
		FileSystem fs=FileSystem.get(new URI("hdfs://hadoop02:9000"),configuration,"node");
		fs.rename(new Path("hdfs://hadoop02:9000/usr/local/output01/bajie.txt"), new Path("hdfs://hadoop02:9000/usr/local/output01/rbajie.txt"));
		System.out.println("Successful");
	}

6.文件详情查看

//文件详情查看
		@Test
		public void readListFiles() throws Exception{
    
    
			Configuration conf=new Configuration();
			
			FileSystem fs = FileSystem.get(new URI("hdfs://hadoop02:9000"), conf,"node");
			RemoteIterator<LocatedFileStatus>listFiles=fs.listFiles(new Path("/"),true);
			
			while(listFiles.hasNext()) {
    
    
				LocatedFileStatus fileStatus=listFiles.next();
				
				System.out.println(fileStatus.getPath().getName());
				System.out.println(fileStatus.getBlockSize());
				System.out.println(fileStatus.getPermission());
				System.out.println(fileStatus.getLen());
				BlockLocation[] blocklocations=fileStatus.getBlockLocations();
				
				for (BlockLocation b1:blocklocations) {
    
    
					System.out.println("block-offset:"+b1.getOffset());
					String[] hosts=b1.getHosts();
					for(String host:hosts) {
    
    
						System.out.println(host);
					}
				}
				System.out.println("----------------------本段已完,开启下一段---------------------------------------");
			}
			System.out.println("succeed");
		}

7.查看文件夹

	public void findAtHDFS() throws Exception,IllegalArgumentException,IOException{
    
    
			Configuration conf=new Configuration();
			FileSystem fs=FileSystem.get(new URI("hdfs://hadoop02:9000"), conf, "node");
			
			FileStatus[] listStatus=fs.listStatus(new Path("/"));
			
			for (FileStatus status:listStatus) {
    
    
				if (status.isFile()) {
    
    
					System.out.println("f--"+status.getPath().getName());
				}
				else {
    
    
					System.out.println("d--"+status.getPath().getName());
				}
			}
			System.out.println("successful");
		} 
}

猜你喜欢

转载自blog.csdn.net/chunfenxiaotaohua/article/details/103270222
今日推荐