sparkDF与pandasDF相互转化并将sparkDF存入hive

import pandas as pd
from pyspark.sql import SparkSession
spark= SparkSession\
                .builder \
                .appName("Dataframe") \
                .getOrCreate()

data=pd.DataFrame([[1,2],[3,4]],columns=['a','b'])
data_values=data.values.tolist()
data_coulumns=list(data.columns)
df=spark.createDataFrame(data)

#将pandas.DataFrame转为spark.dataFrame
spark_df  = spark.createDataFrame(data_values, data_coulumns)
print('spark.dataFram=',spark_df.show())

#将spark.dataFrame转为pandas.DataFrame  
pandas_df = spark_df.toPandas()  
print('pandas.DataFrame=',pandas_df)

#将spark.dataFrame存入hive
spark_df.createOrReplaceTempView('table_test')
spark.sql(
"create table tmp.table_test SELECT * FROM table_test"
)

 

你可能感兴趣的:(自学,python,spark,hive)