第四步:HADOOP-SPARK2

scala

$ wget https://downloads.lightbend.com/scala/2.12.11/scala-2.12.11.tgz
$ tar -zxvf scala-2.12.11.tgz -C /usr/local
$ su hadoop
$ cd
$ vim ~/.bashrc
#scala
export SCALA_HOME=/usr/local/scala-2.12.11
export PATH=$PATH:$SCALA_HOME/bin
$ source ~/.bashrc
$ exit

spark

$ wget https://mirrors.tuna.tsinghua.edu.cn/apache/spark/spark-2.4.6/spark-2.4.6-bin-without-hadoop.tgz
$ tar -zxvf spark-2.4.6-bin-without-hadoop.tgz -C /data
$ mv /data/spark-2.4.6-bin-without-hadoop/ /data/spark
$ chown -R hadoop.hadoop /data/spark/
$ su hadoop

spark配置文件

$ cd /data/spark/conf
$ cp spark-env.sh.template spark-env.sh
$ cp spark-defaults.sh.template spark-defaults.sh
$ cp slaves.template slaves

spark-env.sh

$ vim spark-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_231
export SPARK_MASTER_PORT=7077
export SPARK_MASTER_WEBUI_PORT=18088
export SPARK_WORKER_WEBUI_PORT=18081
export SPARK_WORKER_CORES=2
export SPARK_WORKER_MEMORY=6000m
export LD_LIBRARY_PATH=/data/hadoop/lib/native
export SPARK_DIST_CLASSPATH=$(hadoop classpath)
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export SPARK_HOME=/data/spark
export SPARK_WORKER_DIR=/data/spark/work
export SPARK_PID_DIR=/tmp
export SPARK_JAR=/data/spark/jars/*.jar
export PATH=$SPARK_HOME/bin:$PATH
export SPARK_CLASSPATH=$SPARK_CLASSPATH:/data/spark/jars/mysql-connector-java-5.1.49-bin.jar
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=192.168.233.17:2181,192.168.233.238:2181,192.168.233.157:2181 -Dspark.deploy.zookeeper.dir=/spark"
export SPARK_HISTORY_OPTS="-Dspark.history.ui.port=18080 -Dspark.history.retainedApplications=30 -Dspark.history.fs.logDirectory=hdfs://hadoop-test-cluster/logs"

spark-defaults.conf

$ vim spark-defaults.conf
spark.master                     spark://192.168.233.65:7077,192.168.233.94:7077
spark.eventLog.enabled           true
spark.eventLog.dir               hdfs://hadoop-test-cluster/logs
spark.serializer                 org.apache.spark.serializer.KryoSerializer
spark.driver.memory              1g
spark.executor.memory 2g
spark.executor.extraJavaOptions  -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three"

slaves

$ vim slaves
192.168.233.17
192.168.233.238
192.168.233.157

所有节点配置环境

$ vim ~/.bashrc
# spark
export SPARK_HOME=/data/spark
export PATH=$SPARK_HOME/bin:$PATH
$ source ~/.bashrc
$ hdfs dfs -mkdir /logs

同步所有spark节点配置文件
在主节点启动master

$ /data/spark/sbin/start-all.sh

在备节点master

/data/spark/sbin/start-master.sh

测试

$ spark-shell --master spark://192.168.233.65:7077 --executor-memory 500m --total-executor-cores 1
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://hadoop-test-2:4040
Spark context available as 'sc' (master = spark://192.168.233.65:7077, app id = app-20200618155948-0001).
Spark session available as 'spark'.
Welcome to
      ____              __
     / __/__  ___ _____/ /__
    _\ \/ _ \/ _ `/ __/  '_/
   /___/ .__/\_,_/_/ /_/\_\   version 2.4.6
      /_/
         
Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_231)
Type in expressions to have them evaluated.
Type :help for more information.

scala> 

访问WebUI
第四步:HADOOP-SPARK2_第1张图片

你可能感兴趣的:(hadoop,spark,运维)