JAVA-socket模拟客户端发送数据,后台接收存入hadoop

模拟客户端向后台发送数据

package com.njbdqn;

import java.io.IOException;
import java.io.OutputStream;
import java.net.Socket;

public class MyClient extends Thread {
    @Override
    public void run() {
        int num=5000;
        for (int i=0;i<=num;i++) {
            try {
                Socket socket = new Socket("localhost", 59999);
                OutputStream os = socket.getOutputStream();
                String clientInfo = "I am client,Send Message:Are you OK!"+i;

                if(i==num){
                   clientInfo="exit";
                }
                os.write(clientInfo.getBytes());
                os.close();
                socket.close();
                //Thread.sleep(1000);
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

    public static void main(String[] args) {
        new MyClient().start();
    }


}

后台接收数据并存入hadoop

package com.njbdqn;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;

import java.io.*;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.URI;
import java.net.URISyntaxException;

public class MyReaderAndWrite {


    static FileSystem fs;

    static {
        try {
            fs=FileSystem.get(new URI("hdfs://192.168.56.100:9000"),new Configuration(),"root");
        } catch (IOException e) {
            e.printStackTrace();
        } catch (URISyntaxException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }


    public void write() throws IOException {
        System.out.println("1111");
        OutputStream os = null;
        Path path=new Path("/mydemo/e,.txt");
      //解决datanode报错 Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try
        Configuration conf = new Configuration();
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        conf.setBoolean("dfs.client.block.write.replace-datanode-on-failure.enabled", true);
        
        try {
            //os = fs.create(path);
            //InputStream inputStream=new FileInputStream("d://at.txt");
            //建立服务器
            ServerSocket sck = new ServerSocket(59999);
            if (!fs.exists(path)){
                System.out.println("create");
                fs.create(path).close();
            }

            FSDataOutputStream append = fs.append(path);

            while (true) {
                //监控服务器通讯
                Socket socket = sck.accept();
                //打开输入输出流
                InputStream is = socket.getInputStream();
                //字节流转字符流
                BufferedReader br = new BufferedReader(new InputStreamReader(is));
                //System.out.println(is.read());
                String info = null;
                boolean flag=false;
                while (!((info = br.readLine()) == null)) {
                    System.out.println(info);
                    if (!info.equalsIgnoreCase("exit")){
                        InputStream in = new ByteArrayInputStream(info.getBytes());
                    IOUtils.copyBytes(in,append,4096,false);
                }
                    if (info.equalsIgnoreCase("exit")){
                        flag=true;
                    }
                }
                if (flag){
                    break;
                }
                if (info==null){
                    continue;
                }
                append.close();
                br.close();
                is.close();
                socket.close();
            }
            sck.close();
        } catch(IOException e) {
            e.printStackTrace();
        }

    }

    public static void main(String[] args) throws IOException {

        new MyReaderAndWrite().write();
    }
}

你可能感兴趣的:(hadoop)