java写了一段从AWS s3读取csv文件,并使用spark sql 处理后结果保存到mysql数据库,并写入到s3 上csv文件的代码如下:
package org.example.JavaDemo;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.example.util.DateUtil;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
import java.util.HashMap;
import java.util.Properties;
import io.netty.buffer.AbstractByteBufAllocator;
public class SparkSqlCsvToCsv {
public static void main(String[] args) {
/**
* 中国区域s3.cn-north-1.amazonaws.com.cn
* 宁夏cn-northwest-1,北京cn-north-1
*/
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("=========Start process=========Data:"+DateUtil.getCurrentTime());
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
String hdfsInAddress = "s3a://emr-demo-input/mydata/";//"hdfs://192.168.209.129:9000/"; //server ip //D:\DevTemp\AWS\ ;s3://emr-demo-input/mydata/
String inputAddress = "";//"in/";
String csvFileName="emr-demo-data-2.csv";
SparkConf conf = new SparkConf().setMaster("local").setAppName("TestSpark");
/*
* Properties properties = new Properties(); InputStream inputStream =
* Object.class.getResourceAsStream("/s3.properties");
* properties.load(inputStream);
*/
JavaSparkContext sc = new JavaSparkContext(conf);//JavaSparkContext过时
//SparkContext sc = new SparkContext(conf);
/*
* sc.hadoopConfiguration().set("fs.s3a.access.key",properties.getProperty(
* "fs.s3a.access.key"));
* sc.hadoopConfiguration().set("fs.s3a.secret.key",properties.getProperty(
* "fs.s3a.secret.key"));
* sc.hadoopConfiguration().set("fs.s3a.endpoint",properties.getProperty(
* "fs.s3a.endpoint"));//spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem
*/
/* spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem
spark.hadoop.fs.s3a.access.key=ACCESSKEY
spark.hadoop.fs.s3a.secret.key=SECRETKEY
*/
//sc.hadoopConfiguration().set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
sc.hadoopConfiguration().set("fs.s3a.access.key","AK***********MH");
sc.hadoopConfiguration().set("fs.s3a.secret.key","VR1spXe+Jb5p**gK**Lb/zM4SI**2tmGmbr");
sc.hadoopConfiguration().set("fs.s3a.endpoint","s3.cn-northwest-1.amazonaws.com.cn");
SQLContext sqlContext = new SQLContext(sc);
HashMap
options.put("header", "true");//设置第一行为头
options.put("inferSchema", "true");//设置自动分析片段类型
//options.put("path", hdfsInAddress + inputAddress + filePath);
options.put("path", hdfsInAddress + inputAddress + csvFileName);
options.put("dateFormat","YYYY-MM-DD");
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("打印上传文件在hdfs的路径:"+hdfsInAddress + inputAddress + csvFileName);
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
/****声明字段类型****/
StructField structFields[] = new StructField[9];
structFields[0] = DataTypes.createStructField("Tier", DataTypes.StringType,true);
structFields[1] = DataTypes.createStructField("SellerCode",DataTypes.StringType,true);
structFields[2] = DataTypes.createStructField("SellerName",DataTypes.StringType,true);
structFields[3] = DataTypes.createStructField("DataSource",DataTypes.StringType,true);
structFields[4] = DataTypes.createStructField("SellerProvince",DataTypes.StringType,true);
structFields[5] = DataTypes.createStructField("_201901",DataTypes.DoubleType,true);
structFields[6] = DataTypes.createStructField("_201902",DataTypes.DoubleType,true);
structFields[7] = DataTypes.createStructField("_201903",DataTypes.DoubleType,true);
structFields[8] = DataTypes.createStructField("flag",DataTypes.StringType,true);
StructType structType = new StructType(structFields);
Dataset dataFrame = sqlContext.load("com.databricks.spark.csv", structType, options);
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("===================read csv finish:"+DateUtil.getCurrentTime());
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
// DataFrame cars = (new CsvParser()).withUseHeader(true).csvFile(sqlContext, "cars.csv");//通过CsvParser里面的函数来读取CSV文件
dataFrame.registerTempTable("result");
StringBuffer sparkSql = new StringBuffer("select ");
sparkSql.append("Tier");
sparkSql.append(", SellerCode");
sparkSql.append(", SellerName");
sparkSql.append(", DataSource");
sparkSql.append(", SellerProvince");
sparkSql.append(", _201901");
sparkSql.append(", _201902");
sparkSql.append(", _201903");
sparkSql.append(", if(_201903>_201902,'up','down') as flag");
sparkSql.append(" from result");
Dataset resultFrame=sqlContext.sql(sparkSql.toString() );
//resultFrame.createOrReplaceTempView("resultView");//创建视图
//System.out.println("***************用Dataset打印*peopleScore********"+resultFrame.limit(10).showString(20,0,false));
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("******print schema *******");
resultFrame.printSchema();
//resultFrame.select("SellerName").show();
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
//Tier SellerCode SellerName DataSource SellerProvince _201901 _201902 _201903
Dataset df = resultFrame.select(
resultFrame.col("Tier"),
resultFrame.col("SellerCode"),
resultFrame.col("SellerName"),
resultFrame.col("DataSource"),
resultFrame.col("SellerProvince"),
resultFrame.col("_201901"),
resultFrame.col("_201902"),
resultFrame.col("_201903"),
resultFrame.col("flag")
);
df = df.filter(df.col("Tier").contains("T"));//where condition:equalTo/
//df = df.filter((df.col("_201902").cast(DataTypes.FloatType)).gt((df.col("201901").cast(DataTypes.FloatType))));//gt 大于
//df = df.orderBy(df.col("_201902").cast(DataTypes.FloatType).asc_nulls_first());//转换类型并升序
//df.groupBy("age").count();//分组
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("******df.show() print schema *******");
df.show();
System.out.println("===================exe csv finish:"+DateUtil.getCurrentTime());
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
/*************将结果写入到 mysql 数据库******************/
//数据库连接
String url = "jdbc:mysql://mydbinstance.cpj**********k.rds.cn-northwest-1.amazonaws.com.cn:3306/hive?useUnicode=true&characterEncoding=utf-8";
//String url = jdbc:mysql://127.0.0.1:3306/hive?useUnicode=true&characterEncoding=utf-8
Properties connectionProperties = new Properties();
connectionProperties.put("user","username");//root
connectionProperties.put("password","12345678");//123456
connectionProperties.put("driver","com.mysql.jdbc.Driver");
/**插入数据库表中**/
df.write().mode(SaveMode.Overwrite).jdbc(url,"t_result",connectionProperties);//Overwrite会覆盖数据和表结构
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("===================write db finish:"+DateUtil.getCurrentTime());
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
/****输出到S3****/
df.write().option("header","true").option("delimiter",",").csv("s3a://emr-demo-output/mydata/result");
sc.stop();
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
System.out.println("****************write csv end Data:"+DateUtil.getCurrentTime());
System.out.println("EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR#EMR");
}
}
接下来是执行。
在EMR上手动添加执行步骤,自定义jar的方式执行会报异常,如下:
Exception in thread "main" java.lang.NoSuchMethodError: io.netty.buffer.PooledByteBufAllocator.defaultNumHeapArena()I
at org.apache.spark.network.util.NettyUtils.createPooledByteBufAllocator(NettyUtils.java:113)
at org.apache.spark.network.client.TransportClientFactory.
at org.apache.spark.network.TransportContext.createClientFactory(TransportContext.java:99)
at org.apache.spark.rpc.netty.NettyRpcEnv.
at org.apache.spark.rpc.netty.NettyRpcEnvFactory.create(NettyRpcEnv.scala:461)
at org.apache.spark.rpc.RpcEnv$.create(RpcEnv.scala:57)
at org.apache.spark.SparkEnv$.create(SparkEnv.scala:249)
at org.apache.spark.SparkEnv$.createDriverEnv(SparkEnv.scala:175)
at org.apache.spark.SparkContext.createSparkEnv(SparkContext.scala:257)
at org.apache.spark.SparkContext.
at org.apache.spark.api.java.JavaSparkContext.
at org.example.JavaDemo.SparkSqlCsvToCsv.main(SparkSqlCsvToCsv.java:40)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:239)
at org.apache.hadoop.util.RunJar.main(RunJar.java:153)
是环境jar包冲突。根据maven版本管理处理了一下,没效果直接放弃治疗。
使用putty 以ec2-user访问EMR主节点。在主节点上执行以下命令执行:
spark-submit --class com.company.acme.MyClass s3://mybucket/emr/test.jar
执行成功,但是报了个异常。
java.io.IOException: File '/var/aws/emr/userData.json' cannot be read
at com.amazon.ws.emr.hadoop.fs.shaded.org.apache.commons.io.FileUtils.op enInputStream(FileUtils.java:296)
at com.amazon.ws.emr.hadoop.fs.shaded.org.apache.commons.io.FileUtils.re adFileToString(FileUtils.java:1711)
at com.amazon.ws.emr.hadoop.fs.shaded.org.apache.commons.io.FileUtils.re adFileToString(FileUtils.java:1748)
at com.amazon.ws.emr.hadoop.fs.util.UserData.getUserData(UserData.java:6 2)
at com.amazon.ws.emr.hadoop.fs.util.UserData.
at com.amazon.ws.emr.hadoop.fs.util.UserData.ofDefaultResourceLocations( UserData.java:52)
at com.amazon.ws.emr.hadoop.fs.util.AWSSessionCredentialsProviderFactory .buildSTSClient(AWSSessionCredentialsProviderFactory.java:52)
at com.amazon.ws.emr.hadoop.fs.util.AWSSessionCredentialsProviderFactory .
at com.amazon.ws.emr.hadoop.fs.rolemapping.DefaultS3CredentialsResolver. resolve(DefaultS3CredentialsResolver.java:22)
at com.amazon.ws.emr.hadoop.fs.guice.CredentialsProviderOverrider.overri de(CredentialsProviderOverrider.java:25)
at com.amazon.ws.emr.hadoop.fs.s3.lite.executor.GlobalS3Executor.execute Overriders(GlobalS3Executor.java:171)
at com.amazon.ws.emr.hadoop.fs.s3.lite.executor.GlobalS3Executor.execute (GlobalS3Executor.java:103)
at com.amazon.ws.emr.hadoop.fs.s3.lite.AmazonS3LiteClient.invoke(AmazonS 3LiteClient.java:189)
at com.amazon.ws.emr.hadoop.fs.s3.lite.AmazonS3LiteClient.invoke(AmazonS 3LiteClient.java:184)
at com.amazon.ws.emr.hadoop.fs.s3.lite.AmazonS3LiteClient.getObjectMetad ata(AmazonS3LiteClient.java:96)
at com.amazon.ws.emr.hadoop.fs.s3.lite.AbstractAmazonS3Lite.getObjectMet adata(AbstractAmazonS3Lite.java:43)
at com.amazon.ws.emr.hadoop.fs.s3n.Jets3tNativeFileSystemStore.retrieveM etadata(Jets3tNativeFileSystemStore.java:220)
at com.amazon.ws.emr.hadoop.fs.s3n.S3NativeFileSystem.getFileStatus(S3Na tiveFileSystem.java:860)
at org.apache.hadoop.fs.FileSystem.isFile(FileSystem.java:1466)
at com.amazon.ws.emr.hadoop.fs.EmrFileSystem.isFile(EmrFileSystem.java:3 62)
at org.apache.spark.util.Utils$.fetchHcfsFile(Utils.scala:747)
at org.apache.spark.util.Utils$.doFetchFile(Utils.scala:723)
at org.apache.spark.deploy.DependencyUtils$.downloadFile(DependencyUtils .scala:137)
at org.apache.spark.deploy.SparkSubmit$$anonfun$prepareSubmitEnvironment $7.apply(SparkSubmit.scala:356)
at org.apache.spark.deploy.SparkSubmit$$anonfun$prepareSubmitEnvironment $7.apply(SparkSubmit.scala:356)
at scala.Option.map(Option.scala:146)
at org.apache.spark.deploy.SparkSubmit.prepareSubmitEnvironment(SparkSub mit.scala:355)
at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubm it$$runMain(SparkSubmit.scala:782)
at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161 )
at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)
at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scal a:928)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:937)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
属于权限问题,赋权即可:
sudo chmod 444 /var/aws/emr/userData.json
再执行
spark-submit --class com.company.acme.MyClass s3://mybucket/emr/test.jar
OK,顺利通过。
欢迎加微信spsace学习讨论。