spark提交应用程序的执行脚本

start.sh

#!/bin/bash
#########################################################################
# File Name: start.sh
# Author: dai
#########################################################################
source /home/hadoop/.bashrc
. /etc/profile
hadoop fs -rm -r checkpoint/Data
RUN=/share/apps/spark-2.4.4/bin/spark-submit
BASE_HOME=/home/pro/jianjian01/StarRocks/DataWrite
PID_FILE=$BASE_HOME/run/$(basename $BASE_HOME).pid
#EXEC=$(find $BASE_HOME/sbin -name *.jar)
EXEC=$BASE_HOME/sbin/$(ls $BASE_HOME/sbin)

CONF=$BASE_HOME/conf/application.properties
LOGCONF=$BASE_HOME/conf/log4j.properties
LOGDIR=$BASE_HOME/logs/writer.out
echo $EXEC
cd $BASE_HOME/bin
#yarn --deploy-mode client  --queue protest --num-executors 35 spark://190.176.35.102:7079 --conf spark.cores.max=12
nohup $RUN --master spark://190.176.35.102:7079 --conf spark.cores.max=12 --conf "spark.driver.extraJavaOptions=-Dlog4j.configuration=file:$LOGCONF -Djava.security.auth.login.config=$BASE_HOME/conf/kafka_client_jaas.conf" --conf "spark.executor.extraJavaOptions=-Djava.security.auth.login.config=kafka_client_jaas.conf" --files /home/pro/jianjian01/StarRocks/DataWrite/conf/kafka_client_jaas.conf --executor-memory 1g --class com.sibat.police.Writer $EXEC > $LOGDIR &
sleep 1
PID=$(ps -ef|grep $EXEC|grep -v grep|awk '{print $2}')
echo $BASE_HOME
echo $PID_FILE
echo $EXEC
echo $CONF
echo $PID
/bin/echo $PID > $PID_FILE

stop.sh

#########################################################################
# File Name: stop.sh
# Author: dai
#########################################################################
#!/bin/bash
. /etc/profile
BASE_HOME=/home/pro/jianjian01/StarRocks/DataWrite
PID_FILE=$BASE_HOME/run/$(basename $BASE_HOME).pid
EXEC=$BASE_HOME/sbin/$(ls $BASE_HOME/sbin)
PID=$(ps -ef|grep $EXEC|grep -v grep|awk '{print $2}')

/bin/kill -9 $PID && /bin/rm -f $PID_FILE

echo $BASE_NAME
echo $BASE_HOME
echo $PID_FILE
echo $EXEC
echo $PID

batch.sh

#!/bin/sh
. /etc/profile
. ~/.bash_profile
#部署在102的/home/pro/jianjian01上
Yesterday=$(date -d "-1 day" +%Y%m%d)
time_start=$(date +%s)
/share/apps/spark-2.4.4/bin/spark-submit --master spark://190.176.35.102:7079 \
--conf spark.cores.max=71 \
--driver-memory 18g \
--class com.sibat.police.batch.Write \
/home/pro/jianjian01/StarRocks/DataWrite/sbin/datawrite-1.0-SNAPSHOT-jar-with-dependencies.jar \
$Yesterday 
echo "$Yesterday 数据写入成功!"
time_end=$(date +%s)
time_take=$(((time_end - time_start)/60))
echo "程序总共花费的时间是 $time_take 分钟"

你可能感兴趣的:(spark,kafka,大数据)