首先到238执行:
cd /usr/local/hbase-1.2.1/bin/
./stop-hbase.sh
使用的软件: FileZilla SecureCRT.exe
碰到报错,到238,239,240
cd /usr/local
./stophbase.sh
执行完之后,使用
ps axu |grep hbase
查看是否还有残余进程。。有的话,根据查看到的pid 执行
kill -9 $pid (这里的$pid是你看到的pid数字)
然后使用
zkServer.sh restart
hadoop dfsadmin -safemode get hadoop安全模式是否开启
./restartzookeeper.sh (三台都要执行)重启zookeeper
然后执行
./starthbase.sh 启动hbase
hbase hbck -repair
scan 'DW_KFB_NHTZ',{LIMIT=>5}
get 'DW_INVEST_CZTZ', 'null800000000000网贷天眼'
delete 'DM_ALL_INVEST','201703021001560000597559'
deleteall 'DM_ALL_INVEST','20170301'
put 'DM_YY_TJ','20170410','USERS:INVT_AMT','1415814.84'
put 't1','rowkey001','f1:col1','value01'
nohup /usr/hdp/current/spark2-client/bin/spark-submit --class "planJob" --master yarn --deploy-mode cluster /root/project/dcproject/BigData.jar > /root/project/dcproject/dc.log &
[root@mnode spark-2.0.0-bin-hadoop2.7]# ./bin/spark-submit --executor-memory 2g --class "planJob" --master local[4] myApp/BigData.jar
[root@mnode spark-2.0.0-bin-hadoop2.7]# ./bin/spark-submit --class "QdJob" --master local[4] myApp/BigData.jar
[/usr/local/kafka_2.11-0.10.0.0/bin]# kafka-topics.sh --create --zookeeper "192.168.100.110:2181" --topic "producer_test" --partitions 10 --replication-factor 1
-server -XX:PermSize=128M -XX:MaxPermSize=256m
idea里面工程执行配置:
-server -XX:PermSize=512M -XX:MaxPermSize=512m -XX:MaxNewSize=512M -XX:MaxPermSize=512M -XX:-UseGCOverheadLimit
集群上执行jar文件:
nohup /usr/hdp/current/spark2-client/bin/spark-submit --executor-memory 2g --class "planJob" --master yarn /root/project/dcproject/BigData.jar > /root/project/dcproject/dc.log &