[zkpk@master ~]$ cd /home/zkpk/tgz/spark/
[zkpk@master spark]$ tar -xzvf spark-2.1.1-bin-hadoop2.7.tgz -C /home/zkpk/
[zkpk@master spark]$ cd
[zkpk@master ~]$ cd spark-2.1.1-bin-hadoop2.7/
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ ls -l
执行ls -l命令会看到下面的图片所示内容,这些内容是Spark包含的文件:
[zkpk@master ~]$ cd
[zkpk@master ~]$ gedit ~/.bash_profile
#SPARK ON YARN
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export HDFS_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
[zkpk@master ~]$ source ~/.bash_profile
说明:在master和slave01、slave02节点都要如此修改此文件
[zkpk@master ~]$ vim ~/hadoop-2.7.3/etc/hadoop/yarn-site.xml
yarn.nodemanager.pmem-check-enabled</name>
false</value>
</property>
yarn.nodemanager.vmem-check-enabled</name>
false</value>
</property>
![在这里插入图片描述](https://img-blog.csdnimg.cn/30b25836994545c191442ff18f227621.png)
[zkpk@master ~]$ stop-all.sh
[zkpk@master ~]$ start-all.sh
[zkpk@master ~]$ cd ~/spark-2.1.1-bin-hadoop2.7
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ ./bin/spark-submit --class org.apache.spark.examples.SparkPi --master yarn --num-executors 3 --driver-memory 1g --executor-memory 1g --executor-cores 1 examples/jars/spark-examples*.jar 10
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ ./bin/spark-shell
4.5.3.2打开浏览器,输入下面地址,查看运行界面(地址:http://master:4040/)
scala> :quit
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ cd
[zkpk@master ~]$ cd hadoop-2.7.3/etc/hadoop/
[zkpk@master hadoop]$ cp hdfs-site.xml /home/zkpk/spark-2.1.1-bin-hadoop2.7/conf
[zkpk@master hadoop]$ cd
[zkpk@master ~]$ cd apache-hive-2.1.1-bin/conf/
[zkpk@master conf]$ cp hive-site.xml /home/zkpk/spark-2.1.1-bin-hadoop2.7/conf/
[zkpk@master conf]$ cd
[zkpk@master ~]$ cd spark-2.1.1-bin-hadoop2.7/conf/
[zkpk@master conf]$ vim hive-site.xml
hive.metastore.warehouse.dir</name>
/user/spark/warehouse</value>
</property>
[zkpk@master conf]$ cd
[zkpk@master ~]$ cd apache-hive-2.1.1-bin/lib/
[zkpk@master lib]$ cp mysql-connector-java-5.1.28.jar /home/zkpk/spark-2.1.1-bin-hadoop2.7/jars/
[zkpk@master lib]$ cd
[zkpk@master ~]$ stop-all.sh
[zkpk@master ~]$ start-all.sh
[zkpk@master ~]$ cd ~/spark-2.1.1-bin-hadoop2.7
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ ./bin/spark-sql --master yarn
[zkpk@master spark-2.1.1-bin-hadoop2.7]$ cd
[zkpk@master ~]$ stop-all.sh