192.168.174.128 hadoop01
192.168.174.129 hadoop02
192.168.174.131 hadoop03
JAVA_HOME=/usr/local/java/jdk1.8.0_201
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME PATH
server.1=xxx.xxx.xxx.xxx:2888:3888
server.2=xxx.xxx.xxx.xxx:2888:3888
server.3=xxx.xxx.xxx.xxx:2888:3888
export JAVA_HOME=/usr/local/java/jdk1.8.0_201
export HADOOP_CONF_DIR=/home/software/hadoop-2.7.1/etc/hadoop
<property>
<name>fs.defaultFSname>
<value>hdfs://nsvalue>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/software/hadoop-2.7.1/tmpvalue>
property>
<property>
<name>ha.zookeeper.quorumname>
<value>hadoop01:2181,hadoop02:2181,hadoop03:2181value>
property>
<property>
<name>dfs.nameservicesname>
<value>nsvalue>
property>
<property>
<name>dfs.ha.namenodes.nsname>
<value>nn1,nn2value>
property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1name>
<value>hadoop01:9000value>
property>
<property>
<name>dfs.namenode.http-address.ns.nn1name>
<value>hadoop01:50070value>
property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2name>
<value>hadoop02:9000value>
property>
<property>
<name>dfs.namenode.http-address.ns.nn2name>
<value>hadoop02:50070value>
property>
<property>
<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/nsvalue>
property>
<property>
<name>dfs.journalnode.edits.dirname>
<value>/home/software/hadoop-2.7.1/tmp/journalvalue>
property>
<property>
<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.nsname>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>sshfencevalue>
property>
<property>
<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/root/.ssh/id_rsavalue>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>file:///home/software/hadoop-2.7.1/tmp/hdfs/namevalue>
property>
<property>
<name>dfs.datanode.data.dirname>
<value>file:///home/software/hadoop-2.7.1/tmp/hdfs/datavalue>
property>
<property>
<name>dfs.replicationname>
<value>3value>
property>
<property>
<name>dfs.permissionsname>
<value>falsevalue>
property>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>yarn.resourcemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm1name>
<value>hadoop01value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm2name>
<value>hadoop03value>
property>
<property>
<name>yarn.resourcemanager.recovery.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.store.classname>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
value>
property>
<property>
<name>yarn.resourcemanager.zk-addressname>
<value>hadoop01:2181,hadoop02:2181,hadoop03:2181value>
property>
<property>
<name>yarn.resourcemanager.cluster-idname>
<value>ns-yarnvalue>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.resourcemanager.hostnamename>
<value>hadoop03value>
property>
hadoop01
hadoop02
hadoop03
hdfs zkfc -formatZK
hadoop-daemon.sh start journalnode
hadoop namenode -format
hadoop-daemon.sh start namenode
hdfs namenode -bootstrapStandby
hadoop-daemon.sh start namenode
hadoop-daemon.sh start datanode
hadoop-daemon.sh start zkfc
start-yarn.sh
yarn-daemon.sh start resourcemanager
export JAVA_HOME=/usr/local/java/jdk1.8.0_201
export HADOOP_CONF_DIR=/home/software/hadoop-2.7.1/etc/hadoop
<property>
<name>fs.defaultFSname>
<value>hdfs://hadoop01:9000value>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/software/hadoop-2.7.1/tmpvalue>
property>
dfs.replication
1
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>yarn.resourcemanager.hostnamename>
<value>hadoop01value>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
hadoop01
export HADOOP_HOME=/home/software/hadoop-2.7.1
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
Namenode:50070
Datanode:50075
SecondaryNamenode:50090
yarn:8088
a1.sources=r1
a1.channels=c1
a1.sinks=s1
a1.sources.r1.type=spooldir
a1.sources.r1.spoolDir=/home/zebra
a1.sources.r1.interceptors=i1
a1.sources.r1.interceptors.i1.type=timestamp
a1.sinks.s1.type=hdfs
a1.sinks.s1.hdfs.path=hdfs://192.168.150.137:9000/zebra/reportTime=%Y-%m-%d
a1.sinks.s1.hdfs.fileType=DataStream
a1.sinks.s1.hdfs.rollInterval=30
a1.sinks.s1.hdfs.rollSize=0
a1.sinks.s1.hdfs.rollCount=0
a1.channels.c1.type=memory
a1.sources.r1.channels=c1
a1.sinks.s1.channel=c1
../bin/flume-ng agent -n a1 -c ./ -f ./my.conf -Dflume.root.logger=INFO,console
<configuration>
<property>
<name>javax.jdo.option.ConnectionURLname>
<value>jdbc:mysql://hadoop01:3306/hive?createDatabaseIfNotExist=truevalue>
property>
<property>
<name>javax.jdo.option.ConnectionDriverNamename>
<value>com.mysql.jdbc.Drivervalue>
property>
<property>
<name>javax.jdo.option.ConnectionUserNamename>
<value>rootvalue>
property>
下载mysql的安装包
确认当前虚拟机之前是否有安装过mysql
删除mysql
新增mysql用户组,并创建mysql用户
groupadd mysql
useradd -r -g mysql mysql
安装mysql server rpm包和client包
安装后,mysql文件所在的目录
修改my.cnf,默认在/usr/my.cnf
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
[mysqld]
character_set_server=utf8
将mysqld加入系统服务,并随机启动
启动mysqld
查看初始生成的密码
修改初始密码
export JAVA_HOME=xxxx
export HBASE_MANAGES_ZK=false
<property>
<name>hbase.rootdirname>
<value>hdfs://hadoop01:9000/hbasevalue>
property>
<property>
<name>hbase.cluster.distributedname>
<value>truevalue>
property>
#配置Zookeeper的连接地址与端口号
<property>
<name>hbase.zookeeper.quorumname>
<value>hadoop01:2181,hadoop02:2181,hadoop03:2181value>
property>
hadoop01
hadoop02
hadoop03
# 修改配置
broker.id=0
log.dirs=/home/software/kafka/kafka-logs
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181
# 添加配置
delete.topic.enable=true
advertised.host.name=192.168.234.21
advertised.port=9092
执行:sh kafka-server-start.sh ../config/server.properties
# 注意配置项开头需要有空格,:后面需要跟空格,否则启动会报错
storm.zookeeper.servers:
- "hadoop01"
- "hadoop02"
- "hadoop03"
nimbus.host: "hadoop01"
storm.local.dir: "/home/software/apache-storm-0.9.3/tmp"
# hadoop01代表当前主机名,也可用ip
SPARK_LOCAL_IP=hadoop01
sh spark-shell --master=local
上传解压spark安装包
进入spark安装目录的conf目录
配置spark-env.sh文件
SPARK_LOCAL_IP=hadoop01
#spark的shuffle中间过程会产生一些临时文件,此项指定的是其存放目录,不配置默认是在 /tmp目录下
SPARK_LOCAL_DIRS=/home/software/spark/tmp
export JAVA_HOME=/home/software/jdk1.8
在conf目录下,编辑slaves文件
hadoop01
hadoop02
hadoop03
配置完后,将spark目录发送至其他节点, 并更改对应的SPARK_LOCAL_IP配置