pyspark dataframe基本用法

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar  8 19:10:57 2019

@author: lg
"""

from pyspark.sql import SparkSession

upper='/opt/spark/spark-2.4.0-bin-hadoop2.7/'
spark = SparkSession \
    .builder \
    .appName("Python Spark SQL basic example") \
    .config("spark.some.config.option", "some-value") \
    .getOrCreate()
# spark is an existing SparkSession
df = spark.read.json(upper+"examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()

df.printSchema()

df.select("name").show()

df.select(df['name'], df['age'] + 1).show()
df.filter(df['age'] > 21).show()

df.groupBy("age").count().show()

# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")

sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()

# Register the DataFrame as a global temporary view
df.createGlobalTempView("people")

# Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()

# Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age|   name|
# +----+-------+
# |null|Michael|
# |  30|   Andy|
# |  19| Justin|
# +----+-------+

spark.stop()

你可能感兴趣的:(spark)