git clone https://github.com/coreos/kube-prometheus.git
因为某些镜像被墙了,所以需要手动从阿里云上拉取镜像
该镜像是kube-state-metrics部署中所需要的一个镜像,下面进行镜像下载:
sudo docker pull mirrorgooglecontainers/addon-resizer:1.8.4
#显示以下信息则代表下载成功
1.8.4: Pulling from mirrorgooglecontainers/addon-resizer
Digest:sha256:737c634a95f04bffbcd8d3a833745cc26d4d42e8a759ca75b8b8b407ef40b780
Status: Image is up to date for mirrorgooglecontainers/addon-resizer:1.8.4
docker.io/mirrorgooglecontainers/addon-resizer:1.8.4
#然后将仓库名称:tag修改成部署的yaml文件对应的信息。
sudo docker tag mirrorgooglecontainers/addon-resizer:1.8.4 k8s.gcr.io/addon-resizer:1.8.4
#从阿里云拉取镜像
sudo docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/prometheus
#显示以下信息代表镜像拉取成功
Using default tag: latest
latest: Pulling from google_containers/prometheus
Digest: sha256:a36643ecff25461626eb259e11309dbbff400839a0a90e2dea19c34267abd0a3
Status: Image is up to date for registry.cn-hangzhou.aliyuncs.com/google_containers/prometheus:latest
registry.cn-hangzhou.aliyuncs.com/google_containers/prometheus:latest
#修改tag
sudo docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/prometheus quay.io/prometheus/prometheus
其余的镜像在部署过程中可以正常下载,在后续的同学的部署中如果遇到镜像不能下载,可以自己寻找镜像网站进行下载并修改tag。
进入下载好的项目中修改yaml文件,使得外网可以通过nodePort方式访问Grafana:
cd ~/kube-prometheus/manifests
vim grafana-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: grafana
name: grafana
namespace: monitoring
spec:
type: NodePort #增加类型NodePort
ports:
- name: http
nodePort: 30006 #手动设定访问端口30006
port: 3000
targetPort: http
selector:
app: grafana
进入下载好的项目中修改yaml文件,使得外网可以通过nodePort方式访问prometheus
apiVersion: v1
kind: Service
metadata:
labels:
prometheus: k8s
name: prometheus-k8s
namespace: monitoring
spec:
type: NodePort #增加类型NodePort
ports:
- name: web
port: 9090
nodePort: 30007#手动设定访问端口30007
targetPort: web
selector:
app: prometheus
prometheus: k8s
sessionAffinity: ClientIP
cd ~/kube-prometheus/manifests
ls
00namespace-namespace.yaml node-exporter-clusterRole.yaml
0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml node-exporter-daemonset.yaml
0prometheus-operator-0prometheusCustomResourceDefinition.yaml node-exporter-serviceAccount.yaml
0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml node-exporter-serviceMonitor.yaml
0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml node-exporter-service.yaml
0prometheus-operator-clusterRoleBinding.yaml prometheus-adapter-apiService.yaml
0prometheus-operator-clusterRole.yaml prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
0prometheus-operator-deployment.yaml prometheus-adapter-clusterRoleBindingDelegator.yaml
0prometheus-operator-serviceAccount.yaml prometheus-adapter-clusterRoleBinding.yaml
0prometheus-operator-serviceMonitor.yaml prometheus-adapter-clusterRoleServerResources.yaml
0prometheus-operator-service.yaml prometheus-adapter-clusterRole.yaml
alertmanager-alertmanager.yaml prometheus-adapter-configMap.yaml
alertmanager-secret.yaml prometheus-adapter-deployment.yaml
alertmanager-serviceAccount.yaml prometheus-adapter-roleBindingAuthReader.yaml
alertmanager-serviceMonitor.yaml prometheus-adapter-serviceAccount.yaml
alertmanager-service.yaml prometheus-adapter-service.yaml
grafana-dashboardDatasources.yaml prometheus-clusterRoleBinding.yaml
grafana-dashboardDefinitions.yaml prometheus-clusterRole.yaml
grafana-dashboardSources.yaml prometheus-prometheus.yaml
grafana-deployment.yaml prometheus-roleBindingConfig.yaml
grafana-serviceAccount.yaml prometheus-roleBindingSpecificNamespaces.yaml
grafana-serviceMonitor.yaml prometheus-roleConfig.yaml
grafana-service.yaml prometheus-roleSpecificNamespaces.yaml
kube-state-metrics-clusterRoleBinding.yaml prometheus-rules.yaml
kube-state-metrics-clusterRole.yaml prometheus-serviceAccount.yaml
kube-state-metrics-deployment.yaml prometheus-serviceMonitorApiserver.yaml
kube-state-metrics-roleBinding.yaml prometheus-serviceMonitorCoreDNS.yaml
kube-state-metrics-role.yaml prometheus-serviceMonitorKubeControllerManager.yaml
kube-state-metrics-serviceAccount.yaml prometheus-serviceMonitorKubelet.yaml
kube-state-metrics-serviceMonitor.yaml prometheus-serviceMonitorKubeScheduler.yaml
kube-state-metrics-service.yaml prometheus-serviceMonitor.yaml
node-exporter-clusterRoleBinding.yaml prometheus-service.yaml
cd ~/kube-prometheus
kubectl create -f manifests/ || true
#由于部署所需的资源中存在竞争性,即前面的服务部署可能需要用到后面的安装的资源,所以下面的apply命令可能需要执行两遍
kubectl apply -f manifests/ 2>/dev/null || true
#再次执行该命令
kubectl apply -f manifests/ 2>/dev/null || true
#查看部署的pod信息
kubectl get pod -n monitoring -o wide
#节点部署信息
alertmanager-main-0 2/2 Running 0 17h 10.244.1.29 hp-55
alertmanager-main-1 2/2 Running 0 17h 10.244.4.26 hp-52
alertmanager-main-2 2/2 Running 0 17h 10.244.3.30 hp-51
grafana-57bfdd47f8-sxxln 1/1 Running 0 17h 10.244.2.34 hp-50
kube-state-metrics-59c47cfcfb-nfc8c 4/4 Running 0 14h 10.244.4.28 hp-52
node-exporter-mn9xf 2/2 Running 0 17h 192.168.30.55 hp-55
node-exporter-nqk7k 2/2 Running 0 17h 192.168.30.51 hp-51
node-exporter-pxkgf 2/2 Running 0 17h 192.168.30.54 hp-54
node-exporter-xv9nh 2/2 Running 0 17h 192.168.30.50 hp-50
node-exporter-zwpw6 2/2 Running 0 17h 192.168.30.52 hp-52
prometheus-adapter-668748ddbd-7w2j7 1/1 Running 0 17h 10.244.1.28 hp-55
prometheus-k8s-0 3/3 Running 1 17h 10.244.2.35 hp-50
prometheus-k8s-1 3/3 Running 6 17h 10.244.3.31 hp-51
prometheus-operator-55b978b89-8jz2l 1/1 Running 0 17h 10.244.4.25 hp-52
在你的manifest目录下,创建这三个yaml文件。
cat > prometheus-pushgatewayServiceMonitor.yaml <
cat > prometheus-pushgatewayService.yaml <
cat >prometheus-pushgatewayDeployment.yaml <
然后使用
kubectl apply -f .