hive自动化统计标签表非零值

一键解决大标签表统计sql的编写与结果格式的调整

统计SQL生成脚本
table_name=$1

concat_data() {

   file_name=`file_name_txt $1`
   
   first_field=`echo ${file_name}|awk '{print $1}'|sed 's/ //g'`
   
   for i in ${file_name}
   do
   is_field=$i
   
   nf_field=`echo ${is_field} |awk -F',' '{print NF-1}'`

   if [[ $is_field != ${first_field} ]];then
     a=",sum(case when $i > '0' then 1 else 0 end ) as $i""_true"
     c=",sum(case when $i <= '0' then 1 else 0 end ) as $i""_false"
     echo $a
     echo $c
   elif [[ ${i} = ${first_field} ]];then
    echo "sum(case when $i > '0' then 1 else 0 end ) as $i""_all"
   else
     b=",${i}"
     echo $b
   fi 
done
}


file_name_txt(){

hive_data=`hcat -e "desc $1"|awk '{print $1}'`

for i in ${hive_data} 
do 
if [[ $i != '#' ]];then
   echo $i 
else 
   exit 0
fi 
done
}
  • 生成可执行SQL文件 sh test_cnt.sh "表名" > label_cnt.sql
spark运行SQL文件配置,下载表头
    /usr/local/spark-2.4.3-bin-hadoop2.7/bin/spark-sql --driver-memory 8g \
    --executor-memory 8g \
    --executor-cores 3 \
    --conf spark.yarn.executor.memoryOverhead=6g \
    --conf spark.driver.memoryOverhead=2g \
    --conf spark.sql.autoBroadcastJionThreshold=500485760 \
    --conf spark.network.timeout=800000 \
    --conf spark.driver.maxResultSize=4g \
    --conf spark.rpc.message.maxSize=500 \
    --conf spark.rpc.askTimeout=600 \
    --conf spark.executor.heartbeatInterval=60000 \
    --conf spark.dynamicAllocation.enabled=true \
    --conf spark.shuffle.service.enabled=true \
    --conf spark.dynamicAllocation.minExecutors=2 \
    --conf spark.dynamicAllocation.maxExecutors=150 \
    --conf spark.dynamicAllocation.executorIdleTimeout=100s \
    --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=300s \
    --conf spark.scheduler.mode=FAIR \
    --conf spark.dynamicAllocation.schedulerBacklogTimeout=2s \
    --conf spark.default.parallelism=400 \
    --conf spark.sql.shuffle.partitions=400 \
    --conf spark.sql.broadcastTimeout=1800 \
    --conf spark.maxRemoteBlockSizeFetchToMem=512m \
    --hiveconf hive.cli.print.header=true \
    --hiveconf hive.resultset.use.unique.column.names=false \
    --name "标签统计" \
    --queue "root.users.gs.gs" \
    -f "$1"

统计结果文件生成 sh spark_f label_cnt.sql > res_cnt_mid.txt

运行结果行转列
line=`cat $1|awk '{print NF}'|head -n 1`
for n in `seq 1 ${line}`
do
   cat  $1 |awk '{print $'''$n'''}' | xargs 
done

生成最终结果 sh hang_to_lie.sh res_cnt_mid.txt > output_cnt.txt

你可能感兴趣的:(hive自动化统计标签表非零值)