Hadoop+Hbase+Spark集群配置-Spark HA安装



解压部署
cp /usr/hadoop/spark/conf/spark-env.sh.template /usr/hadoop/spark/conf/spark-env.sh
vi /usr/hadoop/spark/conf/spark-env.sh
#####################################################
export JAVA_HOME=/usr/hadoop/jdk
#指定每个Worker需要的内存大小(全局)
export SPARK_WORKER_MEMORY=1g
#指定Spark恢复模式,这里采用Zookeeper模式,默认为NONE
export  -Dspark.deploy.recoveryMode=ZOOKEEPER
export  -Dspark.deploy.zookeeper.url=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
export  -Dspark.deploy.zookeeper.dir=/spark
#下面是结合Spark On Yarn方式的集群模式需要配置的,独立集群模式不需要配置
export HADOOP_HOME=/usr/hadoop
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
#####################################################

cp /usr/hadoop/spark/conf/slaves.template /usr/hadoop/spark/conf/slaves
vi /usr/hadoop/spark/conf/slaves
#####################################################
slave147
slave149

拷贝
for ip in `seq 147 149`;do scp -r /usr/hadoop/spark slave$ip:/usr/hadoop/;done

你可能感兴趣的:(大数据集群安装)