$ cat /etc/apt/sources.list
deb http://mirrors.163.com/debian/ stretch main non-free contrib
deb http://mirrors.163.com/debian/ stretch-updates main non-free contrib
deb http://mirrors.163.com/debian/ stretch-backports main non-free contrib
deb-src http://mirrors.163.com/debian/ stretch main non-free contrib
deb-src http://mirrors.163.com/debian/ stretch-updates main non-free contrib
deb-src http://mirrors.163.com/debian/ stretch-backports main non-free contrib
deb http://mirrors.163.com/debian-security/ stretch/updates main non-free contrib
deb-src http://mirrors.163.com/debian-security/ stretch/updates main non-free contrib
deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
# deb-src [arch=amd64] https://download.docker.com/linux/debian stretch stable
# 如果是ubuntu新版本18.04默认是不能安装docker-ce 17.03版本,需要使用下面的源
deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable
# deb-src [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable
$ cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
$ sysctl --system
还有一点是kubernetes v1.10.3只能支持17.03版本的docker,需要按照下面步骤进行
ref: https://v1-10.docs.kubernetes.io/docs/tasks/tools/install-kubeadm/#before-you-begin
$ apt-get update
$ apt-get install -y apt-transport-https ca-certificates curl software-properties-common
$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
$ add-apt-repository "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") $(lsb_release -cs) stable"
$ apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 17.03 | head -1 | awk '{print $3}')
$ sudo apt-get remove docker docker-engine docker.io containerd runc
SET UP THE REPOSITORY
$ sudo apt-get update
$ sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
$ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
$ sudo apt-key fingerprint 0EBFCD88
pub 4096R/0EBFCD88 2017-02-22
Key fingerprint = 9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88
uid Docker Release (CE deb)
sub 4096R/F273FCD8 2017-02-22
$ sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
INSTALL DOCKER CE
$ sudo apt-get update
$ sudo apt-get install docker-ce docker-ce-cli containerd.io
$ apt-cache madison docker-ce
docker-ce | 5:18.09.1~3-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 5:18.09.0~3-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 18.06.1~ce~3-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 18.06.0~ce~3-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
...
# 实际情况如下
$ sudo apt-cache madison docker-ce | grep 17.03
docker-ce | 17.03.3~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.2~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.1~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.0~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
b. Install a specific version using the version string from the second column, for example, 5:18.09.13-0debian-stretch .
$ sudo apt-get install docker-ce= docker-ce-cli= containerd.io
# 实际安装版本
$ sudo apt-get install docker-ce=17.03.3~ce-0~debian-stretch
$ sudo apt-get install docker-ce-cli=5:18.09.0~3-0~debian-stretch containerd.io
Verify that Docker CE is installed correctly by running the hello-world image.
$ sudo docker run hello-world
refer: https://kubernetes.io/docs/setup/independent/install-kubeadm/
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat </etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
上面的官方镜像可能无法访问,需要使用国内的阿里云镜像地址安装,参考https://blog.csdn.net/nklinsirui/article/details/80581286#debian-ubuntu
阿里云镜像地址:https://opsx.alibaba.com/mirror?lang=en-US
# Kubernets国内镜像
## 添加密钥
$ curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
## 添加源
$ cat </etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
## 更新索引
$ apt-get update
## 查找对应的版本
$ apt-cache madison kubectl | grep 1.10.3
kubectl | 1.10.3-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
$ apt-cache madison kubeadm | grep 1.10.3
kubeadm | 1.10.3-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
$ apt-cache madison kubelet | grep 1.10.3
kubelet | 1.10.3-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
# 安装对应版本(v1.10.3)
$
You might want to run 'apt --fix-broken install' to correct these.
The following packages have unmet dependencies:
kubelet : Depends: kubernetes-cni (= 0.6.0) but 0.7.5-00 is to be installed
E: Unmet dependencies. Try 'apt --fix-broken install' with no packages (or specify a solution).
需要先到阿里云镜像仓库下载对应的包,安装完后再继续安装,如
$ wget https://mirrors.aliyun.com/kubernetes/apt/pool/kubernetes-cni_0.6.0-00_amd64_43460dd3c97073851f84b32f5e8eebdc84fadedb5d5a00d1fc6872f30a4dd42c.deb
$ sudo dpkg -i kubernetes-cni_0.6.0-00_amd64_43460dd3c97073851f84b32f5e8eebdc84fadedb5d5a00d1fc6872f30a4dd42c.deb
$ sudo apt-get install kubeadm=1.10.3-00 kubectl=1.10.3-00 kubelet=1.10.3-00
可以手动从阿里镜像pull镜像,然后再进行初始化
# master节点需要的镜像
master_images=("etcd-amd64:3.1.12" \
"kube-apiserver-amd64:v1.10.3" \
"kube-controller-manager-amd64:v1.10.3" \
"kube-scheduler-amd64:v1.10.3" \
"pause-amd64:3.1" \
"kube-proxy-amd64:v1.10.3" \
"k8s-dns-kube-dns-amd64:1.14.8" \
"k8s-dns-dnsmasq-nanny-amd64:1.14.8" \
"k8s-dns-sidecar-amd64:1.14.8")
for img in ${master_images[@]};
do
image="registry.aliyuncs.com/google_containers/$img"
docker image pull ${image}
docker image tag ${image} k8s.gcr.io/$img
docker image rm ${image}
done
# worker(node)节点,只需要kube-proxy, pause镜像
node_images=("pause-amd64:3.1" "kube-proxy-amd64:v1.10.3")
for img in ${node_images[@]};
do
image="registry.aliyuncs.com/google_containers/$img"
docker image pull ${image}
docker image tag ${image} k8s.gcr.io/$img
docker image rm ${image}
done
# 初始化, --apiserver-advertise-address指向当前master节点的ip
$ sudo kubeadm init \ --apiserver-advertise-address=10.0.2.5 \ --pod-network-cidr=172.16.81.0/20 \
--kubernetes-version v1.10.3
# 新开一个容器来查看初始化错误
$ sudo journalctl -xefu kubelet
$ export KUBECONFIG=/etc/kubernetes/admin.conf
# 可以看到coredns为Pending状态,这个需要安装网络插件后才会变成Running状态
$ kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-8686dcc4fd-4ltcf 0/1 Pending 0 45m
coredns-8686dcc4fd-dvg5w 0/1 Pending 0 45m
etcd-k8s-master-testing 1/1 Running 1 44m 172.16.81.164 k8s-master-testing
kube-apiserver-k8s-master-testing 1/1 Running 1 44m 172.16.81.164 k8s-master-testing
kube-controller-manager-k8s-master-testing 1/1 Running 1 44m 172.16.81.164 k8s-master-testing
kube-proxy-mdx9s 1/1 Running 0 32m 172.16.81.165 k8s-node-01-testing
kube-proxy-vd9k4 1/1 Running 1 45m 172.16.81.164 k8s-master-testing
kube-scheduler-k8s-master-testing 1/1 Running 1 44m 172.16.81.164 k8s-master-testing
安装Weave Net
官方文档:https://www.weave.works/docs/net/latest/kubernetes/kube-addon/#install
$ kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
由于weave net网络插件的默认通信地址可能与现有的冲突,导致无法正常通讯,所以需要先修改默认网络
# 先下载到本地
$ curl -L "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" > weave.yml
# 更新网络配置
spec:
minReadySeconds: 5
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE # IP分配范围
value: 10.244.0.0/16 # IP段(这里的IP段可以和上面初始化时 \ --pod-network-cidr=172.16.81.0/20的不一致)
image: 'docker.io/weaveworks/weave-kube:2.5.1'
readinessProbe:
# 创建网络插件
kubectl apply -f weave.yml
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.extensions/weave-net created
验证网络插件安装结果
# 这时可以看到全部都为Running状态了
$ kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-8686dcc4fd-4ltcf 1/1 Running 0 58m 10.244.0.2 k8s-node-01-testing
coredns-8686dcc4fd-dvg5w 1/1 Running 0 58m 10.244.128.1 k8s-master-testing
etcd-k8s-master-testing 1/1 Running 1 57m 172.16.81.164 k8s-master-testing
kube-apiserver-k8s-master-testing 1/1 Running 1 57m 172.16.81.164 k8s-master-testing
kube-controller-manager-k8s-master-testing 1/1 Running 1 57m 172.16.81.164 k8s-master-testing
kube-proxy-mdx9s 1/1 Running 0 44m 172.16.81.165 k8s-node-01-testing
kube-proxy-vd9k4 1/1 Running 1 58m 172.16.81.164 k8s-master-testing
kube-scheduler-k8s-master-testing 1/1 Running 1 57m 172.16.81.164 k8s-master-testing
weave-net-jcznp 2/2 Running 0 86s 172.16.81.165 k8s-node-01-testing
weave-net-skq7q 2/2 Running 0 86s 172.16.81.164 k8s-master-testing
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-testing Ready master 66m v1.14.1
# master节点在初始化完成后会有一段命令,kubeadm join开关的,在worker节点执行即可, 默认有效期为24小时
$ kubeadm join 172.16.81.164:6443 \
--token jx2pp7.o3tay3so8j56yfgc \
--discovery-token-ca-cert-hash \
sha256:5ced3bafc14c2a7fb6e152733dbf5608c0454488327e23a22a41153411cf6f27
如果token过期了怎么办,参考官网文档
ref: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#join-nodes
# 在master节点上查看token
$ kubeadm token list
# 如果token过期了,则可以再重新生成一个,命令如下
$ kubeadm token create
# 如果后面的证书hash值忘记了,则可以通过下面命令获取
$ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
注意: 生产环境中,不建议master节点承载工作负载
ref: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation
$ kubectl taint nodes --all node-role.kubernetes.io/master-
目的就是在任意节点上可管理各节点,即执行kubectl命令,在非master节点上管理集群
步骤:
/etc/kubernetes/admin.conf
# 复制到node节点对应的目录
root@dbk8s-master:~# scp /etc/kubernetes/admin.conf dbk8s-node-01:/etc/kubernetes/
admin.conf 100% 5453 3.1MB/s 00:00
root@dbk8s-master:~# scp /etc/kubernetes/admin.conf dbk8s-node-02:/etc/kubernetes/
admin.conf 100% 5453 4.5MB/s 00:00
$ echo "export KUBECONFIG='/etc/kubernetes/admin.conf'" >> ~/.bashrc
$ . ~/.bashrc
root@dbk8s-node-02:~# echo "export KUBECONFIG='/etc/kubernetes/admin.conf'" >> ~/.bashrc
root@dbk8s-node-02:~# . !$
. ~/.bashrc
root@dbk8s-node-02:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
dbk8s-master Ready master 1h v1.10.3
dbk8s-node-01 Ready 59m v1.10.3
dbk8s-node-02 Ready 59m v1.10.3
root@dbk8s-node-02:~# kubectl -n kube-system get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE
etcd-dbk8s-master 1/1 Running 3 1h 172.16.81.161 dbk8s-master
kube-apiserver-dbk8s-master 1/1 Running 3 1h 172.16.81.161 dbk8s-master
kube-controller-manager-dbk8s-master 1/1 Running 3 1h 172.16.81.161 dbk8s-master
kube-dns-86f4d74b45-9996g 3/3 Running 16 1h 192.168.50.129 dbk8s-master
kube-proxy-2bj4c 1/1 Running 0 59m 172.16.81.162 dbk8s-node-01
kube-proxy-bf7lf 1/1 Running 0 59m 172.16.81.163 dbk8s-node-02
kube-proxy-p7n5s 1/1 Running 4 1h 172.16.81.161 dbk8s-master
kube-scheduler-dbk8s-master 1/1 Running 4 1h 172.16.81.161 dbk8s-master
weave-net-4mwv6 2/2 Running 3 42m 172.16.81.161 dbk8s-master
weave-net-x8dwb 2/2 Running 0 42m 172.16.81.162 dbk8s-node-01
weave-net-xck97 2/2 Running 0 41m 172.16.81.163 dbk8s-node-02
ref:
dashboard本身也是运行在集群上的一个service
# 从阿里云下载镜像
$ docker image pull registry.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
# 打标签
$ docker image tag registry.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1 k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
# 导出镜像至其它节点
$ docker image save -o kuernetes-dashboard-amd64.tar.gz "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1"
$ scp kuernetes-dashboard-amd64.tar.gz dbk8s-node-01:/root
$ scp kuernetes-dashboard-amd64.tar.gz dbk8s-node-02:/root
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
$ wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
$ vim kubernetes-dashboard.yaml
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort # this line
ports:
- port: 443
targetPort: 8443
nodePort: 30001 # 指定端口
selector:
k8s-app: kubernetes-dashboard
$ kubectl apply -f kubernetes-dashboard.yaml
# 查看kubernetes-dashboard安装到那个节点上,或者每个节点都导入该镜像
$ kubectl -n kube-system describe pods `kubectl -n kube-system get pods -o wide | grep dashboard | cut -d' ' -f1` | tail -1
Normal BackOff 8m (x7 over 12m) kubelet, dbk8s-node-02 Back-off pulling image "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1"
https
而不是默认的http访问方式https://172.16.81.161:30001/#!/login
$ kubectl create serviceaccount dashboard-admin -n kube-system
serviceaccount "dashboard-admin" created
$ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin
clusterrolebinding.rbac.authorization.k8s.io "dashboard-admin" created
$ kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: dashboard-admin-token-5m7q6
Namespace: kube-system
Labels:
Annotations: kubernetes.io/service-account.name=dashboard-admin
kubernetes.io/service-account.uid=dbfb8cb7-794f-11e9-97c6-000c29e476c3
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1025 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNW03cTYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZGJmYjhjYjctNzk0Zi0xMWU5LTk3YzYtMDAwYzI5ZTQ3NmMzIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.XmDPqiLzXzWkFxLVntBIrMpAPD2VmbdKYUVo2VrMqdJekuhzcArjz_BTVSZll1NqCN9WYq_2tUA_TMRIyJS_SPeMi7mvwTz4L8upl0ll8qEBK1OPam3h_nTMRmbMWRb4KOKWBPN3_MbHA4XORYSsjKSJ57hcLxguaOUboa257VGF6WFr60BL8USsF3CQfkKY4mRnAIXQUoRi6SlgUSt7NoNalSVnxHN72vPtEjQvoRUcCaeWRZJLxrsNZIUrmdT40QPFYoA12DDGIxV43AQFY6TRFVfnYKowB_BDJ4An6CRurUE-a1yHl7heAWq9Mcw9tm6FYMCyybymSyucdgo0eg
在kubernetes v1.10.3版本中安装的kubernetes-dashboard v1.10.1版本按照上述方式在登录时会报如果警告
最佳解决方案: 这是一个比较直接的解决方案,可以完美解决问题
ref:
# 创建一个超级管理员
adm_account="k8s-dash-admin"
kubectl create serviceaccount ${adm_account} -n kube-system
kubectl create clusterrolebinding ${adm_account} --clusterrole=cluster-admin --serviceaccount=kube-system:${adm_account}
kubectl -n kube-system describe secrets $(kubectl -n kube-system get secret | grep ${adm_account} | awk '{print $1}')
warning
configmaps is forbidden: User “system:serviceaccount:kube-system:kubernetes-dashboard” cannot list configmaps in the namespace “default”
close
还是权限问题,解决方法参考https://www.cnblogs.com/xulingjie/p/10101321.html
# 添加serviceaccount账户,设置并使其可登陆
$ cat << EOF > k8s-ServiceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: aks-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aks-dashboard-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: aks-dashboard-admin
namespace: kube-system
EOF
# 创建完全管理权限
$ cat << EOF > k8s-ServiceAccountMgr.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-head
labels:
k8s-app: kubernetes-dashboard-head
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-head
namespace: kube-system
EOF
这样再重新登录后就没有警告了,注意,这里需要使用新的账号登录
$ kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}'
aks-dashboard-admin-token-bhlhp
dashboard-admin-token-xf2rg
$ kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}' | head -1)
Name: aks-dashboard-admin-token-bhlhp
Namespace: kube-system
Labels:
Annotations: kubernetes.io/service-account.name=aks-dashboard-admin
kubernetes.io/service-account.uid=ed9e7167-796b-11e9-8dd5-08002741bec7
Type: kubernetes.io/service-account-token
Data
====
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJha3MtZGFzaGJvYXJkLWFkbWluLXRva2VuLWJobGhwIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFrcy1kYXNoYm9hcmQtYWRtaW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlZDllNzE2Ny03OTZiLTExZTktOGRkNS0wODAwMjc0MWJlYzciLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWtzLWRhc2hib2FyZC1hZG1pbiJ9.XIAT5jwIOn5Q9BxA_O1jDgsUvLmfd71Ki3Z4isRidi79CQJmuLH5cRNeaIl3XW4U3lY2kURbK09e0p_KHNic0SpfHP0Hv35AJGvobrUGYEPCFUug5sAlDq8yRNcpMCu5D3vGnYvrCAvBkLb5VTchQXwFyAmgj6fyX4ugvnssHqJ6Lpbn6RMZvEXTEF-RO3CBSOghor38IHA5TLhi7SZZEGfsqOMOhgBrh2YefqX-NtsS2QqjlWDcsK6UOUlaLa2la7huYaIM7seV69PjjUi1Ewj_M72Ho8_1EhlQj8wD1FCle6jkjc59uAZ0GQEVkO4fOz5FpHuvkZHhph8pYmVC2A
ca.crt: 1025 bytes
Heapster负责k8s集群度量数据采集与容器监控
refs:
https://github.com/kubernetes-retired/heapster/blob/master/docs/influxdb.md
下载Heapster源码到master节点
$ wget https://codeload.github.com/kubernetes-retired/heapster/zip/master
# 需要进入到项目的根目录
$ pwd
/root/heapster-master
# 查看预先需要下载的镜像文件
$ cd ~path..path/heapster-master/deploy/kube-config/influxdb
$ grep -Hrni 'image: ' | awk '{print $NF}'
k8s.gcr.io/heapster-amd64:v1.5.4
k8s.gcr.io/heapster-influxdb-amd64:v1.5.2
k8s.gcr.io/heapster-grafana-amd64:v5.0.4
# 下载需要的镜像
hps_imgs=(heapster-amd64:v1.5.4 heapster-influxdb-amd64:v1.5.2 heapster-grafana-amd64:v5.0.4)
for img in ${hps_imgs[@]};
do
image="registry.aliyuncs.com/google_containers/$img"
docker image pull ${image}
docker image tag ${image} k8s.gcr.io/$img
docker image rm ${image}
done
# 创建heapster
$ create -f deploy/kube-config/influxdb/
deployment.extensions "monitoring-grafana" created
service "monitoring-grafana" created
serviceaccount "heapster" created
deployment.extensions "heapster" created
service "heapster" created
deployment.extensions "monitoring-influxdb" created
service "monitoring-influxdb" created
$ kubectl create -f deploy/kube-config/rbac/heapster-rbac.yaml
clusterrolebinding.rbac.authorization.k8s.io "heapster" created
# 查看容器分布到那些机器
$ kubectl -n kube-system get pods -o wide | egrep 'grafana|influxdb'
monitoring-grafana-68b57d754-ftfms 1/1 Running 0 12m 192.168.50.98 dbk8s-node-02
monitoring-influxdb-cc95575b9-7d9xj 1/1 Running 0 12m 192.168.50.2 dbk8s-node-01
把刚刚下载的镜像导出,分别再导入到对应的节点上,然后再刷新浏览器,点击Overview时,已经可以看到图形了, 最新的貌似已经集成Heapster了