HDFS-监控jstack脚本

monitor_nn_failover.sh

用途: 服务挂掉了,并且在日志中没有发现是什么原因导致服务挂掉,需要进一步分析当时挂掉时候的进程的jstack信息;

#!/bin/bash
LOG_DIR=/var/log/hdfs1
PREFIX=nn-jstack-
MAX_NR=20
SLEEP_INTERVAL=10
KUBECTL='kubectl -s https://127.0.0.1:6443 --certificate-authority=/srv/kubernetes/ca.pem --client-certificate=/srv/kubernetes/admin.pem --client-key=/srv/kubernetes/admin-key.pem'
hostname=$(hostname)
pod=$($KUBECTL get pods -o wide | grep ${hostname} | awk '/hdfs-namenode/ {print $1}')
pid_nn=$($KUBECTL exec $pod -c hadoop-hdfs-namenode-hdfs1 jps | awk '/NameNode/ {print $1}')
echo "Namenode pod is $pod and namenode pid is $pid_nn"
START_TIME=$(date +%s)
echo "=== Start at: $START_TIME"
while :
do
  # check if timeout happened
  last_timeout=$(grep "45000 millis timeout" $LOG_DIR/hadoop-hdfs-zkfc-${hostname}.log | tail -n 1)
  if [ -n "$last_timeout" ]; then
    last_time=${last_timeout%%,*}
    last_time=$(date -d "$last_time" +%s)
    if [ $last_time -gt $START_TIME ]; then
      break
    fi
  fi
  # collect jstack
  cur_time=$(date +%F-%T)
  echo $cur_time
  $KUBECTL exec $pod -c hadoop-hdfs-namenode-hdfs1 -- sudo -u hdfs jstack $pid_nn >$LOG_DIR/${PREFIX}${cur_time}.log 2>&1
  # remove oldest one
  nr=$(ls $LOG_DIR/${PREFIX}* | wc -l)
  if [ $nr -gt $MAX_NR ]; then
    oldest=$(ls -1tr $LOG_DIR/${PREFIX}* | head -n 1 )
    rm -f $oldest
  fi
  sleep $SLEEP_INTERVAL
done
pkg=${PREFIX}$(hostname)-$(date +%s).tgz
tar zcf $pkg $LOG_DIR/${PREFIX}*
rm -f $LOG_DIR/${PREFIX}*
echo "=== Done: $pkg"

你可能感兴趣的:(大数据,hdfs,hadoop,大数据)