How Spark Read Sftp Files from Hadoop SFTP FileSystem

Gradle Dependencies

	    implementation('org.apache.spark:spark-sql_2.13:3.5.3'){
	        exclude group: "org.apache.logging.log4j", module: "log4j-slf4j2-impl"
	    }
	    implementation('org.apache.hadoop:hadoop-common:3.3.4'){
	        exclude group: "org.slf4j", module: "slf4j-reload4j"
	    }
    
        testImplementation "org.springframework.boot:spring-boot-starter-test"
        testImplementation "org.apache.sshd:sshd-core:2.15.0"
        testImplementation "org.apache.sshd:sshd-sftp:2.15.0"

Setup a Fake SFTP server

        // GIVEN
        // SETUP Fake SFTP Server
        String host = "127.0.0.1";
        String user = "username";
        String passwd = "password";
        int port = 9188;
        // using password
        //String sftpURL = String.format("sftp://%s:%s@%s:%d", user, passwd, host, port);
        // using keypair
        String identifyFile = System.getProperty("user.home").concat(File.separator).concat(".ssh").concat(File.separator).concat("id_rsa");
        String sftpURL = String.format("sftp://%s@%s:%d", user, host, port);

        SshServer sshd = SshServer.setUpDefaultServer();
        sshd.setPort(port);
        sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider());

        sshd.setPublickeyAuthenticator(new AuthorizedKeysAuthenticator(Paths.get(identifyFile.concat(".pub"))));
        //sshd.setPasswordAuthenticator((username, password, session) -> user.equals(username) && passwd.equals(password) );

        sshd.setSubsystemFactories(Collections.singletonList(new SftpSubsystemFactory()));
        sshd.setFileSystemFactory(new VirtualFileSystemFactory(rootPath));

        sshd.start();
        System.out.println("Fake SFTP server started at port " + port);

Generate A tested CSV file based on Hadoop SFTP FileSystem api

        // WHEN
        // Create  CSV files by Hadoop FileSystem api
        Path testedRootPath = new Path("test-path");
        
        Configuration conf = new Configuration();
        conf.set("fs.sftp.impl", "org.apache.hadoop.fs.sftp.SFTPFileSystem");
        conf.set("fs.sftp.keyfile", identifyFile.concat(".pem")); // convert it to pem format
        conf.set("fs.defaultFS", sftpURL);

        // get FileSystem instance by a root Path
        Path path = new Path("/");
        FileSystem sftpFileSystem = FileSystem.get(path.toUri(),conf);
        Assertions.assertTrue(sftpFileSystem instanceof SFTPFileSystem);

        // Create a root folder and  2 test csv files with text contents
        sftpFileSystem.mkdirs(testedRootPath);
        try (BufferedWriter br = new BufferedWriter(new OutputStreamWriter(sftpFileSystem.create(new Path(testedRootPath, "test1.csv"), true)))) {
            br.write("A|B|C|D");
            br.newLine();
            br.write("1|2|3|4");
        }
        try (BufferedWriter br = new BufferedWriter(new OutputStreamWriter(sftpFileSystem.create(new Path(testedRootPath, "test2.csv"), true)))) {
            br.write("A|B|C|D");
            br.newLine();
            br.write("5|6|7|8");
        }


        // check the tested file
        RemoteIterator<LocatedFileStatus> remoteIterator= sftpFileSystem.listFiles(new Path("/"), true);
        List<LocatedFileStatus> statuses = new ArrayList<>();
        while (remoteIterator.hasNext()){
            statuses.add(remoteIterator.next());
        }
        Assertions.assertEquals(2, statuses.size());
        Assertions.assertTrue(statuses.get(0).isFile());
        Assertions.assertTrue(statuses.get(1).isFile());
        Assertions.assertEquals("test1.csv", statuses.get(0).getPath().getName());
        Assertions.assertEquals("test2.csv", statuses.get(1).getPath().getName());

Finally, Read the tested data from SFTP Server


    // THEN
        // Read the test csv file by Spark
        SparkConf sparkConf = new SparkConf()
                .setAppName("spark-test")
                .setMaster("local[2]")
                .set("spark.ui.enabled","false")
                .set("spark.hadoop.fs.sftp.impl","org.apache.hadoop.fs.sftp.SFTPFileSystem")
                .set("spark.hadoop.fs.sftp.keyfile", identifyFile.concat(".pem"))
               // .set("spark.hadoop.fs.defaultFS",sftpURL)
                ;
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();

        // read csv file by the sftp connection
        Dataset<Row> dataset = sparkSession.read()
                .option("header","true").option("delimiter","|")
                .option("recursiveFileLookup","true")
                .csv(sftpURL.concat("/"));
        dataset.printSchema();
        dataset.show();
 root
     |-- A: string (nullable = true)
     |-- B: string (nullable = true)
     |-- C: string (nullable = true)
     |-- D: string (nullable = true)

    +---+---+---+---+
    |  A|  B|  C|  D|
    +---+---+---+---+
    |  1|  2|  3|  4|
    |  5|  6|  7|  8|
    +---+---+---+---+

你可能感兴趣的:(Cloudy,Computation,spark,hadoop,大数据)