Ambari API 将数据采集到 MySQL

前提

  • 主机上需要安装 jq 才能正常解析 json
  • Ambari API服务可用
  • Ambari 版本 2.7.3

 

脚本

如下:

#!/bin/bash
db_host=localhost
db_user=root
db_password=root

username=root
password=root
url_base_path=http://localhost:8080/api/v1/clusters/clustername/
file_base_path=/data/zpf/ops/json/
#定义json 存储目录
#cl_path=$file_base_path'cl.json'
#nn_path=$file_base_path'nn.json'
#dn_path=$file_base_path'dn.json'
#hm_path=$file_base_path'hm.json'
#hr_path=$file_base_path'hr.json'
#hs_path=$file_base_path'hs.json'
#hmeta_path=$file_base_path'hmeta.json'
#zk_path=$file_base_path'zk.json'
#rm_path=$file_base_path'rm.json'
#nm_path=$file_base_path'nm.json'
#统一采集 json 串
#echo curl -u admin:$(password) -sS -G "$url_base_path" > cl.json &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HDFS/components/NAMENODE'" > $nn_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HDFS/components/DATANODE‘" > $dn_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HBASE/components/HBASE_MASTER’" > $hm_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HBASE/components/HBASE_REGIONSERVER‘" > $hr_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HIVE/components/HIVE_SERVER’" > $hs_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/HIVE/components/HIVE_METASTORE‘" > $hmeta_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/ZOOKEEPER/components/ZOOKEEPER_SERVER’" > $zk_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/YARN/components/RESOURCEMANAGER‘" > $rm_path &
#echo curl -u admin:'admin' -sS -G "$url_base_path'services/YARN/components/NODEMANAGER’" > $nm_path &


#cl_json=`echo cat $cl_path`
#nn_json=`echo cat $nn_path`
#dn_json=`echo cat $dn_path`
#hm_json=`echo cat $hm_path`
#hr_json=`echo cat $hr_path`
#hs_json=`echo cat $hs_path`
#hmeta_json=`echo cat $hmeta_path`
#zk_json=`echo cat $zk_path`
#rm_json=`echo cat $rm_path`
#nm_json=`echo cat $nm_path`
cl_json=`curl -u $username:$password -sS -G "$url_base_path"`
echo $cl_json|head
nn_json=`echo cat $nn_path`
dn_json=`echo cat $dn_path`
hm_json=`echo cat $hm_path`
hr_json=`echo cat $hr_path`
hs_json=`echo cat $hs_path`
hmeta_json=`echo cat $hmeta_path`
zk_json=`echo cat $zk_path`
rm_json=`echo cat $rm_path`
nm_json=`echo cat $nm_path`
##########################################################################
#检查 HDFS 使用量
##########################################################################
disk_total=`$nn_json| jq '.metrics.dfs.FSNamesystem.CapacityTotal'`
disk_used=`$nn_json| jq '.metrics.dfs.FSNamesystem.CapacityUsed'`
disk_used_rate=`$nn_json| jq '.ServiceComponentInfo.PercentUsed'`
#DFSRemaing=`$nn_json| jq '.ServiceComponentInfo.PercentRemaining'`
#NonDfsUsedSpace=`$nn_json| jq '.ServiceComponentInfo.NonDfsUsedSpace'`

NonDFSUsed=$(echo "scale=5;${NonDfsUsedSpace}/${CapacityTotal}*100" | bc)
##########################################################################
#检查集群主机健康情况
##########################################################################
#AMBARI
TotalHosts=`$cl_json| jq '.Clusters.total_hosts'`
HealthyHosts=`$cl_json| jq '.Clusters.health_report."Host/host_status/UNHEALTHY"'`
ALERT=`$cl_json| jq '.Clusters.health_report."Host/host_status/ALERT"'`
#ZOOKEEPER
zk_total=`$zk_json| jq '.ServiceComponentInfo.total_count'`
zk_alived=`$zk_json| jq '.ServiceComponentInfo.started_count'`
#HDFS
NN_TOTAL=`$nn_json| jq '.ServiceComponentInfo.total_count'`
nn_alived=`$nn_json| jq '.ServiceComponentInfo.started_count'`
nn_heapsize_total=`$nn_json| jq '.ServiceComponentInfo.HeapMemoryMax'`
nn_heapsize_uesd=`$nn_json| jq '.ServiceComponentInfo.HeapMemoryUsed'`
nn_heapsize_rate=`echo "scale=2; $nn_heapsize_uesd/$nn_heapsize_total" | bc`
DN_TOTAL=`$dn_json| jq '.ServiceComponentInfo.total_count'`
dn_alived=`$dn_json| jq '.ServiceComponentInfo.started_count'`
block_total=`$dn_json| jq '.metrics.dfs.FSNamesystem.BlocksTotal'`
block_missing=`$dn_json| jq '.metrics.dfs.FSNamesystem.MissingBlocks'`
file_num=`$dn_json| jq '.metrics.dfs.FSNamesystem.FilesTotal'`
#YARN
rm_total=`$rm_json| jq '.ServiceComponentInfo.total_count'`
rm_alived=`$rm_json| jq '.ServiceComponentInfo.started_count'`
rm_heapsize_total=`$rm_json| jq '.metrics.jvm.HeapMemoryMax'`
rm_heapsize_used=`$rm_json| jq '.metrics.jvm.HeapMemoryUsed'`
rm_heapsize_used_rate=`echo "scale=2; $rm_heapsize_uesd/$rm_heapsize_total" | bc`
nm_total=`$nm_json| jq '.ServiceComponentInfo.total_count'`
nm_alived=`$nm_json| jq '.ServiceComponentInfo.started_count'`
#HIVE
h2_total=`$hs_json| jq '.ServiceComponentInfo.total_count'`
h2_alived=`$hs_json| jq '.ServiceComponentInfo.started_count'`
hmeta_total=`$hmeta_json| jq '.ServiceComponentInfo.total_count'`
hmeta_alived=`$hmeta_json| jq '.ServiceComponentInfo.started_count'`
HIVE_COUNT_NN1=`$hs_json| jq '.host_components' | jq '.[]' | jq '.HostRoles.host_name' | awk -F'"' '{print $2}' | grep nn1`
HIVE_COUNT_NN2=`$hs_json| jq '.host_components' | jq '.[]' |jq '.HostRoles.host_name' | awk -F'"' '{print $2}' | grep nn2`
#HBASE
hm_total=`$hm_json|jq '.ServiceComponentInfo.total_count'`
hm_alived=`$hm_json|jq '.ServiceComponentInfo.started_count'`
hm_heapsize_total=`$hm_json|jq '.metrics.jvm.memMaxM'`
hm_heapsize_used=0
hm_heapsize_rate=0
hrs_total=`$hr_json|jq '.ServiceComponentInfo.total_count'`
hrs_alived=`$hr_json|jq '.ServiceComponentInfo.started_count'`
hrs_load=`$hm_json|jq '.ServiceComponentInfo.AverageLoad'`
REGIONSERVER=`$hr_json|jq '.ServiceComponentInfo.started_count'`
RS_TOTAL=`$hr_json|jq '.ServiceComponentInfo.total_count'`

sql="insert into ambari_simple (
cluster_id, cluster_name, host_num, zk_total, zk_alived, disk_total, disk_used, disk_used_rate, nn_total, nn_alived, nn_heapsize_rate, dn_total
, dn_alived, block_total, block_missing, dir_num, file_num, small_file_num, rm_total, rm_alived, rm_heapsize_rate, nm_total
, nm_alived, h2_total, h2_alived, hmeta_total, hmeta_alived, hm_total, hm_alived, hm_heapsize_rate, hrs_total, hrs_alived
, hrs_load, hrg_total, hrg_trans
) values (
'0', '0', '$TotalHosts', '$zk_total', '$zk_alived', '$disk_total', '$disk_used', '$disk_used_rate', '$NN_TOTAL', '$nn_alived', '$nn_heapsize_rate', '$DN_TOTAL'
, '$dn_alived', '$block_total', '$block_missing', '0', '$file_num', '0', '$rm_total', '$rm_alived', '$rm_heapsize_rate', '$nm_total'
, '$nm_alived', '$h2_total', '$h2_alived', '$hmeta_total', '$hmeta_alived', '$hm_alived', '$hm_alived', '0', '$hrs_total', '$hrs_alived'
, '$hrs_load', '0', '0')"

#mysql -h$db_host -u$db_user -p$db_password -e "$sql"
echo $sql

#echo $dfs_used
#echo $DFSRemaing
#echo $NonDFSUsed
#echo $NonDfsUsedSpace
#echo $CapacityTotal
#echo $TotalUsed
#echo "========"
#echo $TotalHosts
#echo $HealthyHosts
#echo $UNHealthy
#echo $ALERT
#echo $REGIONSERVER
#echo $RS_TOTAL
#echo $HMASTER
#echo $HM_TOTAL
#echo $DATANODE
#echo $DN_TOTAL
#echo $NAMENODE
#echo $NN_TOTAL
#echo $HIVE_COUNT
#echo $HIVE_COUNT_NN1
#echo $HIVE_COUNT_NN2

 

你可能感兴趣的:(Ambari)