大数据技术原理与应用 实验二HDFS文件系统操作与编程 基于java的HDFS文件操作(扩展实验2)

(1)编程实现一个类“MyFSDataInputStream”,该类继承“org.apache.hadoop.fs.FSDataInputStream”,要求如下:实现按行读取HDFS中指定文件的方法“readLine()”,如果读到文件末尾,则返回空,否则返回文件一行的文本。
记得启动hadoop
代码如下:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.*;
public class MyFSDataInputStream extends FSDataInputStream {
	public MyFSDataInputStream(InputStream in) {
		super(in);
	}
	/** 
		*实现按行读取
		*每次读入一个字符,遇到"n"结束,返回一行内容
		*/
	public static String readline(BufferedReader br) throws IOException {
		char[] data = new char[1024];
		int read = -1;
		int off = 0; //循环执行时,br每次会从上- -次读取结束的位置继续读取,因此该函数里,off每次都从0开始
		while ( (read = br.read(data, off, 1)) != -1 ) {
			if (String.valueOf(data[off]).equals("\n") ) {
			off += 1;
			break;
			}		
			off += 1;
		}
		if (off> 0) {
			return String.valueOf( data);
		} else {
		return null;
		}
	}
	/** 
		*读取文件内容
		*/
		public static void cat(Configuration conf, String remoteFilePath) throws IOException {
			FileSystem fs = FileSystem.get(conf);
   			Path remotePath = new Path(remoteFilePath);
			FSDataInputStream in = fs.open(remotePath);
			BufferedReader br = new BufferedReader(new InputStreamReader(in));
			String line = null;
			while ( (line = MyFSDataInputStream.readline(br)) != null ) {
				System.out.println(line);
			}
			br.close();
			in.close();
			fs.close();
		}
	/** 
	 *主函数
	 */
	public static void main(String[] args) {
		Configuration conf = new Configuration();
		conf.set(" fs.default.name", "hdfs://localhost:9000");
		String remoteFilePath = "/user/hadoop/example.txt"; // HDFS路径
		try {
			MyFSDataInputStream.cat(conf,remoteFilePath);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.*;
public class MyFSDataInputStream extends FSDataInputStream {
	public MyFSDataInputStream(InputStream in) {
		super(in);
	}
	/** 
		*实现按行读取
		*每次读入一个字符,遇到"n"结束,返回一行内容
		*/
	public static String readline(BufferedReader br) throws IOException {
		char[] data = new char[1024];
		int read = -1;
		int off = 0; //循环执行时,br每次会从上- -次读取结束的位置继续读取,因此该函数里,off每次都从0开始
		while ( (read = br.read(data, off, 1)) != -1 ) {
			if (String.valueOf(data[off]).equals("\n") ) {
			off += 1;
			break;
			}		
			off += 1;
		}
		if (off> 0) {
			return String.valueOf( data);
		} else {
		return null;
		}
	}
	/** 
		*读取文件内容
		*/
		public static void cat(Configuration conf, String remoteFilePath) throws IOException {
			FileSystem fs = FileSystem.get(conf);
   			Path remotePath = new Path(remoteFilePath);
			FSDataInputStream in = fs.open(remotePath);
			BufferedReader br = new BufferedReader(new InputStreamReader(in));
			String line = null;
			while ( (line = MyFSDataInputStream.readline(br)) != null ) {
				System.out.println(line);
			}
			br.close();
			in.close();
			fs.close();
		}
	/** 
	 *主函数
	 */
	public static void main(String[] args) {
		Configuration conf = new Configuration();
		conf.set(" fs.default.name", "hdfs://localhost:9000");
		String remoteFilePath = "/user/hadoop/example.txt"; // HDFS路径
		try {
			MyFSDataInputStream.cat(conf,remoteFilePath);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}



运行结果
大数据技术原理与应用 实验二HDFS文件系统操作与编程 基于java的HDFS文件操作(扩展实验2)_第1张图片

你可能感兴趣的:(hadoop学习)