利用JavaAPI访问HDFS的文件

阅读更多

1、重读配置文件core-site.xml

要利用Java客户端来存取HDFS上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上HDFS,文件无法创建、读取。





hadoop.tmp.dir
/home/zhangzk/hadoop
A base for other temporary directories.



fs.default.name
hdfs://linux-zzk-113:9000

配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。

配置项:fs.default.name表示命名的IP地址和端口号,缺省值是file:///,对于JavaAPI来讲,连接HDFS必须使用这里的配置的URL地址,对于数据节点来讲,数据节点通过该URL来访问命名节点。

2、利用JavaAPI来访问HDFS的文件与目录


package com.demo.hdfs; import java.io.BufferedInputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Progressable; /** * @author zhangzk * */ public class FileCopyToHdfs { public static void main(String[] args) throws Exception { try { //uploadToHdfs(); //deleteFromHdfs(); //getDirectoryFromHdfs(); appendToHdfs(); readFromHdfs(); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { System.out.println("SUCCESS"); } } /**上传文件到HDFS上去*/ private static void uploadToHdfs() throws FileNotFoundException,IOException { String localSrc = "d://qq.txt"; String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; InputStream in = new BufferedInputStream(new FileInputStream(localSrc)); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); OutputStream out = fs.create(new Path(dst), new Progressable() { public void progress() { System.out.print("."); } }); IOUtils.copyBytes(in, out, 4096, true); } /**从HDFS上读取文件*/ private static void readFromHdfs() throws FileNotFoundException,IOException { String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); FSDataInputStream hdfsInStream = fs.open(new Path(dst)); OutputStream out = new FileOutputStream("d:/qq-hdfs.txt"); byte[] ioBuffer = new byte[1024]; int readLen = hdfsInStream.read(ioBuffer); while(-1 != readLen){ out.write(ioBuffer, 0, readLen); readLen = hdfsInStream.read(ioBuffer); } out.close(); hdfsInStream.close(); fs.close(); } /**以append方式将内容添加到HDFS上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添dfs.append.supporttrue*/ private static void appendToHdfs() throws FileNotFoundException,IOException { String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); FSDataOutputStream out = fs.append(new Path(dst)); int readLen = "zhangzk add by hdfs java api".getBytes().length; while(-1 != readLen){ out.write("zhangzk add by hdfs java api".getBytes(), 0, readLen); } out.close(); fs.close(); } /**从HDFS上删除文件*/ private static void deleteFromHdfs() throws FileNotFoundException,IOException { String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt"; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); fs.deleteOnExit(new Path(dst)); fs.close(); } /**遍历HDFS上的文件和目录*/ private static void getDirectoryFromHdfs() throws FileNotFoundException,IOException { String dst = "hdfs://192.168.0.113:9000/user/zhangzk"; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); FileStatus fileList[] = fs.listStatus(new Path(dst)); int size = fileList.length; for(int i = 0; i < size; i++){ System.out.println("name:" + fileList[i].getPath().getName() + "/t/tsize:" + fileList[i].getLen()); } fs.close(); } } 注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于Append的操作可以参考Javaeye上的一篇文档。

http://dongyajun.javaeye.com/blog/643391

 

本文来自CSDN博客,转载请标明出处:http://blog.csdn.net/zhangzhaokun/archive/2010/05/16/5597433.aspx

你可能感兴趣的:(java,CVS,capistrano,UML)