大数据日常运维命令

1、HDFS NameNode

/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh start namenode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh stop namenode

bin/hdfs haadmin -DFSHAAdmin -getServiceState n1

2、HDFS DataNode

/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh start datanode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh stop datanode

bin/yarn rmadmin -getServiceState rm1

3、Yarn ResourceManager

/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start resourcemanager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop resourcemanager

4、Yarn NodeManager

/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start nodemanager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop nodemanager

5、Yarn WebAppProxyServer

/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop proxyserver
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start proxyserver

6、Hive

/usr/local/fqlhadoop/hive/bin/hive --service metastore -p 3316 &
ps -ef | grep ‘org.apache.hadoop.hive.metastore.HiveMetaStore’ | grep -v grep | awk ‘{print $2}’ | xargs kill -9

7、Spark

spark-sql --master=yarn --queue lx_etl --driver-memory 1g --driver-java-options ‘-XX:MetaspaceSize=1g -XX:MaxMetaspaceSize=1g’ --num-executors 1 --executor-memory 1g --executor-cores 1 --conf spark.yarn.am.memory=2048m --hiveconf hive.cli.print.header=false

Spark history 服务
/usr/local/fqlhadoop/spark/sbin/stop-history-server.sh hdfs://hacluster/sparklog

/usr/local/fqlhadoop/spark/sbin/start-history-server.sh
/usr/local/fqlhadoop/spark/sbin/start-history-server.sh hdfs://hacluster/sparklog

8、HBase

/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh stop master
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh start master

/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh stop regionserver
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh start regionserver

你可能感兴趣的:(Hadoop,大数据,运维)