[root@CentOS ~]# vi /etc/security/limits.conf
* soft nofile 204800
* hard nofile 204800
* soft nproc 204800
* hard nproc 204800
[root@CentOS ~]# reboot
[root@CentOS ~]# vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=CentOS
[root@CentOS ~]# reboot
[root@CentOS ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.111.132 CentOS
[root@CentOS ~]# service iptables stop
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Flushing firewall rules: [ OK ]
iptables: Unloading modules: [ OK ]
[root@CentOS ~]# chkconfig iptables off
[root@CentOS ~]# rpm -ivh jdk-8u191-linux-x64.rpm
warning: jdk-8u191-linux-x64.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
Preparing... ########################################### [100%]
1:jdk1.8 ########################################### [100%]
Unpacking JAR files...
tools.jar...
plugin.jar...
javaws.jar...
deploy.jar...
rt.jar...
jsse.jar...
charsets.jar...
localedata.jar...
[root@CentOS ~]# vi .bashrc
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
[root@CentOS ~]# source ~/.bashrc
[root@CentOS ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
a5:2d:f5:c3:22:83:cf:13:25:59:fb:c1:f4:63:06:d4 root@CentOS
The key's randomart image is:
+--[ RSA 2048]----+
| ..+. |
| o + oE |
| o = o = |
| . B + + . |
| . S o = |
| o = . . |
| + |
| . |
| |
+-----------------+
[root@CentOS ~]# ssh-copy-id CentOS
The authenticity of host 'centos (192.168.111.132)' can't be established.
RSA key fingerprint is fa:1b:c0:23:86:ff:08:5e:83:ba:65:4c:e6:f2:1f:3b.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'centos,192.168.111.132' (RSA) to the list of known hosts.
root@centos's password:`需要输入密码`
Now try logging into the machine, with "ssh 'CentOS'", and check in:
.ssh/authorized_keys
to make sure we haven't added extra keys that you weren't expecting.
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://CentOS:9000
hadoop.tmp.dir
/usr/hadoop-2.9.2/hadoop-${user.name}
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/hdfs-site.xml
dfs.replication
1
dfs.namenode.secondary.http-address
CentOS:50090
dfs.datanode.max.xcievers
4096
dfs.datanode.handler.count
6
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/slaves
CentOS
[root@CentOS ~]# vi .bashrc
HADOOP_HOME=/usr/hadoop-2.9.2
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
[root@CentOS ~]# source .bashrc
[root@CentOS ~]# hdfs namenode -format # 创建初始化所需的fsimage文件
[root@CentOS ~]# start-dfs.sh
官网下载spark-2.4.3-bin-without-hadoop.tgz解压到/usr目录,并且将Spark目录修改名字为spark-2.4.3然后修改spark-env.sh和spark-default.conf文件.
[root@CentOS ~]# tar -zxf spark-2.4.3-bin-without-hadoop.tgz -C /usr/
[root@CentOS ~]# mv /usr/spark-2.4.3-bin-without-hadoop/ /usr/spark-2.4.3
[root@CentOS ~]# vi .bashrc
SPARK_HOME=/usr/spark-2.4.3
HADOOP_HOME=/usr/hadoop-2.9.2
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
export SPARK_HOME
[root@CentOS ~]# source .bashrc
[root@CentOS spark-2.4.3]# cd /usr/spark-2.4.3/conf/
[root@CentOS conf]# mv spark-env.sh.template spark-env.sh
[root@CentOS conf]# mv slaves.template slaves
[root@CentOS conf]# vi slaves
CentOS
[root@CentOS conf]# vi spark-env.sh
SPARK_MASTER_HOST=CentOS
SPARK_MASTER_PORT=7077
SPARK_WORKER_CORES=4
SPARK_WORKER_MEMORY=2g
LD_LIBRARY_PATH=/usr/hadoop-2.9.2/lib/native
SPARK_DIST_CLASSPATH=$(hadoop classpath)
export SPARK_MASTER_HOST
export SPARK_MASTER_PORT
export SPARK_WORKER_CORES
export SPARK_WORKER_MEMORY
export LD_LIBRARY_PATH
export SPARK_DIST_CLASSPATH
[root@CentOS ~]# cd /usr/spark-2.4.3/
[root@CentOS spark-2.4.3]# ./sbin/start-all.sh
starting org.apache.spark.deploy.master.Master, logging to /usr/spark-2.4.3/logs/spark-root-org.apache.spark.deploy.master.Master-1-CentOS.out
CentOS: starting org.apache.spark.deploy.worker.Worker, logging to /usr/spark-2.4.3/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-CentOS.out
[root@CentOS spark-2.4.3]# ./bin/spark-shell
--master spark://CentOS:7077
--deploy-mode client
--executor-cores
executor-cores:在standalone模式表示程序每个Worker节点分配资源数。不能超过单台自大core个数,如果不清每台能够分配的最大core的个数,可以使用–total-executor-cores,该种分配会尽最大可能分配。
测试:
scala> sc.textFile("hdfs:///words/t_words",5)
.flatMap(_.split(" "))
.map((_,1))
.reduceByKey(_+_)
.sortBy(_._1,true,3)
.saveAsTextFile("hdfs:///results")
[root@CentOS ~]# vi /etc/security/limits.conf
* soft nofile 204800
* hard nofile 204800
* soft nproc 204800
* hard nproc 204800
[root@CentOS ~]# reboot
[root@CentOS ~]# vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=CentOS
[root@CentOS ~]# reboot
[root@CentOS ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.111.132 CentOS
[root@CentOS ~]# service iptables stop
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Flushing firewall rules: [ OK ]
iptables: Unloading modules: [ OK ]
[root@CentOS ~]# chkconfig iptables off
[root@CentOS ~]# rpm -ivh jdk-8u191-linux-x64.rpm
warning: jdk-8u191-linux-x64.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
Preparing... ########################################### [100%]
1:jdk1.8 ########################################### [100%]
Unpacking JAR files...
tools.jar...
plugin.jar...
javaws.jar...
deploy.jar...
rt.jar...
jsse.jar...
charsets.jar...
localedata.jar...
[root@CentOS ~]# vi .bashrc
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
[root@CentOS ~]# source ~/.bashrc
[root@CentOS ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
a5:2d:f5:c3:22:83:cf:13:25:59:fb:c1:f4:63:06:d4 root@CentOS
The key's randomart image is:
+--[ RSA 2048]----+
| ..+. |
| o + oE |
| o = o = |
| . B + + . |
| . S o = |
| o = . . |
| + |
| . |
| |
+-----------------+
[root@CentOS ~]# ssh-copy-id CentOS
The authenticity of host 'centos (192.168.111.132)' can't be established.
RSA key fingerprint is fa:1b:c0:23:86:ff:08:5e:83:ba:65:4c:e6:f2:1f:3b.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'centos,192.168.111.132' (RSA) to the list of known hosts.
root@centos's password:`需要输入密码`
Now try logging into the machine, with "ssh 'CentOS'", and check in:
.ssh/authorized_keys
to make sure we haven't added extra keys that you weren't expecting.
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://CentOS:9000
hadoop.tmp.dir
/usr/hadoop-2.9.2/hadoop-${user.name}
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/hdfs-site.xml
dfs.replication
1
dfs.namenode.secondary.http-address
CentOS:50090
dfs.datanode.max.xcievers
4096
dfs.datanode.handler.count
6
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/slaves
CentOS
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/yarn-site.xml
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.resourcemanager.hostname
CentOS
yarn.nodemanager.pmem-check-enabled
false
yarn.nodemanager.vmem-check-enabled
false
[root@CentOS ~]# vi /usr/hadoop-2.9.2/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
[root@CentOS ~]# vi .bashrc
HADOOP_HOME=/usr/hadoop-2.9.2
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
[root@CentOS ~]# source .bashrc
[root@CentOS ~]# hdfs namenode -format # 创建初始化所需的fsimage文件
[root@CentOS ~]# start-dfs.sh
[root@CentOS ~]# start-yarn.sh
下载spark-2.4.3-bin-without-hadoop.tgz解压到/usr目录,并且将Spark目录修改名字为spark-2.4.3然后修改spark-env.sh和spark-default.conf文件.
[root@CentOS ~]# tar -zxf spark-2.4.3-bin-without-hadoop.tgz -C /usr/
[root@CentOS ~]# mv /usr/spark-2.4.3-bin-without-hadoop/ /usr/spark-2.4.3
[root@CentOS ~]# vi .bashrc
SPARK_HOME=/usr/spark-2.4.3
HADOOP_HOME=/usr/hadoop-2.9.2
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
export SPARK_HOME
[root@CentOS ~]# source .bashrc
[root@CentOS spark-2.4.3]# cd /usr/spark-2.4.3/conf/
[root@CentOS conf]# mv spark-env.sh.template spark-env.sh
[root@CentOS conf]# vi spark-env.sh
HADOOP_CONF_DIR=/usr/hadoop-2.9.2/etc/hadoop
YARN_CONF_DIR=/usr/hadoop-2.9.2/etc/hadoop
SPARK_EXECUTOR_CORES=4
SPARK_EXECUTOR_MEMORY=2G
SPARK_DRIVER_MEMORY=1G
LD_LIBRARY_PATH=/usr/hadoop-2.9.2/lib/native
SPARK_DIST_CLASSPATH=$(hadoop classpath):$SPARK_DIST_CLASSPATH
SPARK_HISTORY_OPTS="-Dspark.history.fs.logDirectory=hdfs:///spark-logs"
export HADOOP_CONF_DIR
export YARN_CONF_DIR
export SPARK_EXECUTOR_CORES
export SPARK_DRIVER_MEMORY
export SPARK_EXECUTOR_MEMORY
export LD_LIBRARY_PATH
export SPARK_DIST_CLASSPATH
export SPARK_HISTORY_OPTS
[root@CentOS conf]# mv spark-defaults.conf.template spark-defaults.conf
[root@CentOS conf]# vi spark-defaults.conf
spark.eventLog.enabled=true
spark.eventLog.dir=hdfs:///spark-logs
在HDFS上创建spark-logs目录,用于作为Sparkhistory服务器存储数据的地方。
[root@CentOS ~]# hdfs dfs -mkdir /spark-logs
[root@CentOS spark-2.4.3]# ./sbin/start-history-server.sh
starting org.apache.spark.deploy.history.HistoryServer, logging to /usr/spark-2.4.3/logs/spark-root-org.apache.spark.deploy.history.HistoryServer-1-CentOS.out
[root@CentOS spark-2.4.3]# jps
5728 NodeManager
5090 NameNode
5235 DataNode
10531 Jps
5623 ResourceManager
5416 SecondaryNameNode
10459 HistoryServer
该进程启动一个内嵌的web ui端口是18080,用户可以访问改页面查看任务执行计划、历史。
./bin/spark-shell
--master yarn
--deploy-mode client
--num-executors 2
--executor-cores 3
–num-executors:在Yarn模式下,表示向NodeManager申请的资源数进程,–executor-cores表示每个进程所能运行线程数。
scala> sc.textFile("hdfs:///words/t_words",5)
.flatMap(_.split(" "))
.map((_,1))
.reduceByKey(_+_)
.sortBy(_._1,true,3)
.saveAsTextFile("hdfs:///results")
在该种模式下,无需安装yarn、无需启动Stanalone,一切都是模拟。
[root@CentOS spark-2.4.3]# ./bin/spark-shell --master local[5]
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://CentOS:4040
Spark context available as 'sc' (master = local[5], app id = local-1561742649329).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.4.3
/_/
Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_191)
Type in expressions to have them evaluated.
Type :help for more information.
scala> sc.textFile("hdfs:///words/t_words").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).sortBy(_._1,false,3).saveAsTextFile("hdfs:///results1/")
scala>
org.apache.spark
spark-core_2.11
2.4.3
net.alchim31.maven
scala-maven-plugin
4.0.1
scala-compile-first
process-resources
add-source
compile
SparkRDDWordCount(本地)
写一个object
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit = {
//创建SparkContext
val conf = new SparkConf().setMaster("local[10]").setAppName("wordcount")
val sc = new SparkContext(conf)
val lineRDD: RDD[String] = sc.textFile("file:///E:/words/t_words.txt")
lineRDD.flatMap(line => line.split(" "))
.map((_, 1))
.groupByKey()
.map(tuple => (tuple._1, tuple._2.sum))
.sortBy(tuple => tuple._2)
.collect()
.foreach(tuple => println(tuple._1 + "=" + tuple._2))
//关闭sc
sc.stop()
}
}
集群(yarn)
//1.创建SparkContext
val conf = new SparkConf().setMaster("yarn").setAppName("wordcount")
val sc = new SparkContext(conf)
val lineRDD: RDD[String] = sc.textFile("hdfs:///words/t_words")
lineRDD.flatMap(line=>line.split(" "))
.map(word=>(word,1))
.groupByKey()
.map(tuple=>(tuple._1,tuple._2.sum))
.sortBy(tuple=>tuple._2,false,1)
.collect()
.foreach(tuple=>println(tuple._1+"->"+tuple._2))
//3.关闭sc
sc.stop()
发布:
[root@CentOS spark-2.4.3]# ./bin/spark-submit --master yarn --deploy-mode client --class com.baizhi.demo02.SparkRDDWordCount --num-executors 3 --executor-cores 4 /root/sparkrdd-1.0-SNAPSHOT.jar
集群(Standalone)
//1.创建SparkContext
val conf = new SparkConf().setMaster("spark://CentOS:7077").setAppName("wordcount")
val sc = new SparkContext(conf)
val lineRDD: RDD[String] = sc.textFile("hdfs:///words/t_words")
lineRDD.flatMap(line=>line.split(" "))
.map(word=>(word,1))
.groupByKey()
.map(tuple=>(tuple._1,tuple._2.sum))
.sortBy(tuple=>tuple._2,false,1)
.collect()
.foreach(tuple=>println(tuple._1+"->"+tuple._2))
//3.关闭sc
sc.stop()
发布:
[root@CentOS spark-2.4.3]# ./bin/spark-submit --master spark://CentOS:7077 --deploy-mode client --class com.baizhi.demo02.SparkRDDWordCount --num-executors 3 --total-executor-cores 4 /root/sparkrdd-1.0-SNAPSHOT.jar