大数据各组件安装笔记
shell脚本
查看机器进程脚本
#!/bin/bash
echo "-----------------cdh01-------------------------"
/opt/apps/jdk1.8.0_45/bin/jps
echo ""
for((i=2;i<=3;i++)){
echo "-----------------cdh0$i------------------------"
ssh cdh0$i "/opt/apps/jdk1.8.0_45/bin/jps"
echo ""
}
多台机器启动关闭zookeeper脚本
chmod +x myzkserver.sh
vi myzkserver.sh
#!/bin/bash
case $1 in
start)
for myhost in cdh01 cdh02 cdh03
do
echo "---------------$myhost---------------"
ssh $myhost > /dev/null 2>&1 << eof
/opt/apps/zookeeper-3.4.5-cdh5.7.6/bin/zkServer.sh start
exit
eof
done
;;
status)
for((i=1;i<=3;i++)){
echo "---------------cdh0$i---------------"
ssh -Tq cdh0$i 2>/dev/null << eof
/opt/apps/zookeeper-3.4.5-cdh5.7.6/bin/zkServer.sh status
exit
eof
}
;;
stop)
for((i=1;i<=3;i++)){
echo "---------------cdh0$i---------------"
ssh cdh0$i "/opt/apps/zookeeper-3.4.5-cdh5.7.6/bin/zkServer.sh stop" >/dev/null 2>&1
}
;;
esac
/root/bin/jps.sh
#envrioment
#export JAVA_HOME=/opt/apps/jdk1.8.0_45
export JAVA_HOME=/opt/apps/jdk1.8.0_261
export HADOOP_HOME=/opt/apps/hadoop-2.8.1
#export HADOOP_HOME=/opt/apps/hadoop-2.7.6
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
#export HIVE_HOME=/opt/apps/hive-1.1.0-cdh5.7.6
export HIVE_HOME=/opt/apps/hive-1.2.1
export HBASE_HOME=/opt/apps/hbase-1.2.0-cdh5.7.6
export ZOOKEEPER_HOME=/opt/apps/zookeeper-3.4.10
export FLUME_HOME=/opt/apps/flume-1.6.0-cdh5.7.6
#export SQOOP_HOME=/opt/apps/sqoop-1.4.6-cdh5.7.6
export SQOOP_HOME=/opt/apps/sqoop-1.4.7
export AZKABAN_HOME=/opt/apps/azkaban-solo-server-0.1.0
export ANT_HOME=/opt/apps/ant-1.8.1
export M2_HOME=/opt/apps/maven-3.3.9
export SCALA_HOME=/opt/apps/scala-2.11.8
export SPARK_HOME=/opt/apps/spark-2.2.0
export OOZIE_HOME=/opt/apps/oozie-4.1.0-cdh5.7.6
export KAFKA_HOME=/opt/apps/kafka-2.11
export REDIS_HOME=/opt/apps/redis-3.2.8
export REDIS_CONF=$REDIS_HOME/conf
export ELASTICSEARCH_HOME=/opt/apps/elasticsearch-6.5.3
export FLINK_HOME=/opt/apps/flink-1.9.1
export KIBANA_HOME=/opt/apps/kibana-6.5.3
export PRESTO_HOME=/opt/apps/presto-server
export PATH=$PATH:$KIBANA_HOME/bin:$FLINK_HOME/bin:$OOZIE_HOME/bin
#export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export CLASSPATH=.:$JAVA_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$HBASE_HOME/bin
export PATH=$PATH:$ZOOKEEPER_HOME/bin:$FLUME_HOME/bin:$SQOOP_HOME/bin:$AZKABAN_HOME/bin
export PATH=$PATH:$ANT_HOME/bin:$M2_HOME/bin:$SCALA_HOME/bin:$SPARK_HOME/sbin:$SPARK_HOME/bin
export PATH=$PATH:$KAFKA_HOME/bin:$REDIS_HOME/bin:$ELASTICSEARCH_HOME/bin:$PRESTO_HOME/bin
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/apps/hbase-1.2.0-cdh5.7.6/lib/*
vi /etc/sysconfig/network-scripts/ifcfg-ens33
BOOTPROTO="static" # 使用静态IP地址,默认为dhcp
IPADDR="192.168.10.101" # 设置的静态IP地址
NETMASK="255.255.255.0" # 子网掩码
GATEWAY="192.168.10.2" # 网关地址
DNS1=8.8.8.8 # DNS服务器
DNS2=8.8.4.4
DNS3=114.114.114.114
ONBOOT="yes" #是否开机启用
systemctl restart network
hostnamectl set-hostname cdh01
ip addr
ping外网、ping主机ip
ping www.baidu.com
ping 主机ip地址
reboot
yum -y install ifconfig
yum search ifconfig
#直接安装
yum -y install net-tools.x86_64
yum -y install vim
yum -y install ntpdate
yum -y install ntp
#ntpdate同步时间:
ntpdate -u time.windows.com
systemctl start ntpd
systemctl enable ntpd
关闭:
systemctl stop firewalld
systemctl disable firewalld
开启:
systemctl enable firewalld
systemctl status firewalld
#关闭NetworkManager
常用管理命令:
systemctl status NetworkManager
systemctl start NetworkManager
systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl enable NetworkManager
vi /etc/hosts
192.168.10.101 cdh01
192.168.10.102 cdh02
192.168.10.103 cdh03
1.解压安装jdk+hadoop+环境变量
2.配置免密登录
ssh-ketgen -t rsa
ssh-copy-id root@localhost
ssh localhost
ssh 0.0.0.0
3.配置文件
3.1hadoop-env.sh
# The java implementation to use.
export JAVA_HOME=/opt/apps/jdk1.8.0_261
3.2hdfs-site.xml
dfs.replication
1
dfs.namenode.name.dir
/opt/apps/hadoop-2.8.1/hdpdata/hdfs/name
dfs.namenode.data.dir
/opt/apps/hadoop-2.8.1/hdpdata/hdfs/data
3.3core-site.xml
fs.defaultFS
hdfs://zhuguofu:9000
hadoop.tmp.dir
/opt/apps/hadoop-2.8.1/hdpdata
3.4yarn-site.xml
yarn.resourcemanager.hostname
zhuguofu
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.vmem-check-enabled
false
yarn.resourcemanager.scheduler.class
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
yarn.scheduler.fair.preemption
true
yarn.scheduler.fair.preemption.cluster-utilization-threshold
1.0
3.5mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
zhuguofu:10020
mapreduce.jobhistory.webapp.address
zhuguofu:19888
3.6创建目录
mkdir -p /opt/apps/hadoop-2.8.1/hdpdata/hdfs/name
mkdir -p /opt/apps/hadoop-2.8.1/hdpdata/hdfs/data
3.7格式化
hdfs namenode -format
tar -zxvf hadoop-2.6.0-cdh5.7.6.tar.gz -C /opt/apps
tar -zxvf jdk-8u45-linux-x64.tar.gz -C /opt/apps
配置环境变量
vi /etc/profile
source /etc/profile
查看版本
java -version
vi /opt/apps/hadoop-2.6.0-cdh5.7.6/etc/hadoop/hadoop-env.sh
主要修改下面这个:
# The java implementation to use.
export JAVA_HOME=/opt/apps/jdk1.8.0_45
hadoop-env.sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use.
export JAVA_HOME=/opt/apps/jdk1.8.0_45
# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
if [ "$HADOOP_CLASSPATH" ]; then
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
else
export HADOOP_CLASSPATH=$f
fi
done
# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
# Extra Java runtime options. Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
# Where log files are stored. $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
###
# HDFS Mover specific parameters
# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by
# the user that will run the hadoop daemons. Otherwise there is the
# potential for a symlink attack.
export HADOOP_PID_DIR=/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata
export HADOOP_SECURE_DN_PID_DIR=/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata
# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER
vi /opt/apps/hadoop-2.6.0-cdh5.7.6/etc/hadoop/hdfs-site.xml
dfs.replication
1
dfs.namenode.name.dir
/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata/hdfs/name
dfs.namenode.data.dir
/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata/hdfs/data
dfs.namenode.checkpoint.dir
/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata/hdfs/sname
dfs.namenode.checkpoint.edits.dir
/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata/hdfs/sname
dfs.permissions.enabled
false
dfs.webhdfs.enabled
true
vi /opt/apps/hadoop-2.6.0-cdh5.7.6/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://cdh01:9000
hadoop.tmp.dir
/opt/apps/hadoop-2.6.0-cdh5.7.6/hdpdata
hadoop.proxyuser.hue.hosts
*
hadoop.proxyuser.hue.groups
*
hadoop.proxyuser.root.hosts
*
hadoop.proxyuser.root.groups
*
mv mapred-site.xml.template mapred-site.xml
vi /opt/apps/hadoop-2.6.0-cdh5.7.6/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
cdh01:10020
mapreduce.jobhistory.webapp.address
cdh01:19888
vi /opt/apps/hadoop-2.6.0-cdh5.7.6/etc/hadoop/yarn-site.xml
yarn.resourcemanager.hostname
cdh01
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.vmem-check-enabled
false
yarn.resourcemanager.scheduler.class
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
yarn.scheduler.fair.preemption
true