部署k8s 1.13.1和istio1.3.1
部署k8s
前提条件
1,ETCD集群正常,并配置有证书
2,系统关闭交换分区,集群各节点配置好hostname和/etc/hosts文件
3,各个节点安装docker,本次使用17.03版本
解压kube1.13.1.tgz文件
# 部署所需的文件和脚本
链接:https://pan.baidu.com/s/1232DdKT3VOgQry88ON3tiQ
提取码:rz1c
tar -zxvf kube1.13.1.tgz
cd kube1.13.1
文件目录结构
├── conf
│ ├── dashboard
│ ├── flannel
│ ├── heapster
│ ├── kubeadm.yaml
│ └── wget
├── helm
│ └── helm-v2.14.3-linux-amd64.tar.gz
├── images
│ ├── coredns_1.2.6.tar
│ ├── etcd_3.2.24.tar
│ ├── flannel-0.11.tar
│ ├── heapster-amd64_v1.5.4.tar
│ ├── heapster-grafana-amd64_v5.0.4.tar
│ ├── heapster-influxdb-amd64_v1.5.2.tar
│ ├── helm-tiller-v2.14.3.tar
│ ├── kube-apiserver_v1.13.1.tar
│ ├── kube-controller-manager_v1.13.1.tar
│ ├── kube-proxy_v1.13.1.tar
│ ├── kubernetes-dashboard-amd64_v1.10.1.tar
│ ├── kube-scheduler_v1.13.1.tar
│ ├── pause_3.1.tar
│ └── pull.sh
├── istio-1.3.1
│ ├── bin
│ ├── install
│ ├── istio-int.yaml
│ ├── istio.VERSION
│ ├── istio.yaml
│ ├── LICENSE
│ ├── README.md
│ ├── samples
│ └── tools
├── istio-tar-1.3.1
│ ├── citadel.tar
│ ├── galley.tar
│ ├── kubectl.tar
│ ├── mixer-1.3.1.tar
│ ├── pilot-1.3.1.tar
│ ├── prometheus-v2.8.0.tar
│ ├── proxy_init-1.3.1.tar
│ ├── proxyv2.tar
│ └── sidecar_injector.tar
├── kubeadm
│ ├── 25cd948f63fea40e81e43fbe2e5b635227cc5bbda6d5e15d42ab52decf09a5ac-kubelet-1.13.1-0.x86_64.rpm
│ ├── 5af5ecd0bc46fca6c51cc23280f0c0b1522719c282e23a2b1c39b8e720195763-kubeadm-1.13.1-0.x86_64.rpm
│ ├── 7855313ff2b42ebcf499bc195f51d56b8372abee1a19bbf15bb4165941c0229d-kubectl-1.13.1-0.x86_64.rpm
│ ├── e253c692a017b164ebb9ad1b6537ff8afd93c35e9ebc340a52c5bd42425c0760-cri-tools-1.11.0-0.x86_64.rpm
│ ├── fe33057ffe95bfae65e2f269e1b05e99308853176e24a4d027bc082b471a07c0-kubernetes-cni-0.6.0-0.x86_64.rpm
│ └── socat-1.7.3.2-2.el7.x86_64.rpm
└── shell
├── init.sh
└── master.sh
修改配置文件
修改kubeadm.yaml文件
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
apiServer:
certSANs:
- "k8s-master01"
- "k8s-master02"
- "k8s-master03"
- "$MASTER01_IP"
- "$MASTER02_IP"
- "$MASTER03_IP"
- "$VIP/$SLB_IP"
controlPlaneEndpoint: "$VIP/$SLB_IP:6443"
kubernetesVersion: v1.13.1
etcd:
external:
endpoints:
- https://$ETCD01_IP:2379
- https://$ETCD02_IP:2379
- https://$ETCD03_IP:2379
caFile: /etc/etcd/ssl/ca.pem
certFile: /etc/etcd/ssl/etcd.pem
keyFile: /etc/etcd/ssl/etcd-key.pem
imageRepository: k8s.gcr.io
networking:
dnsDomain: cluster.local
podSubnet: "19.19.0.0/16"
serviceSubnet: 192.168.0.0/16
certSANs:所有master节点以及SLB、VIP的ip和hostname
endpoints:etcd集群地址
podSubnet的网段要和conf/flannel/kube-flannel-0.11.yaml中保持一致
所有节点
cd shell
sh init.sh
# reset一下
kubeadm reset
在master01节点
cd shell
sh master.sh
# 记录kubeadm join xxx语句,加入node时要使用
# 查看是否成功
kubectl get node
kubectl get pod --all-namespaces -owide
copy证书到其他master节点
# 其他master节点没有/etc/kubernetes目录的话要先创建
scp -r /etc/kubernetes/pki root@$MASTER_IP:/etc/kubernetes
其他master节点
sh master.sh
查看是否成功
kubectl get node
kubectl get pod --all-namespaces -owide
修改kube-proxy开启ipvs
开启ipvs需要内核模块支持
# 查看是否加载内核模块
lsmod|grep ip_vs
# 如果没有加载
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
更改kube-proxy配置
kubectl edit configmap kube-proxy -n kube-system
找到如下部分的内容
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs" # 加上这个
nodePortAddresses: null
其中mode原来是空,默认为iptables模式,改为ipvs。scheduler默认是空,默认负载均衡算法为轮训。编辑完,保存退出
删除所有kube-proxy的pod
kubectl delete pod $POD_NAME -n kube-system
查看kube-proxy的pod日志
kubectl logs $POD_NAME -n kube-system
存在 Using ipvs Proxier 即可
添加node节点
node节点执行
kubeadm join xxx
如果忘记kubeadm join xxx 可以在任意master节点执行一下语句获得
kubeadm token create --print-join-command
dashboard和heapster
# 参考链接
https://blog.csdn.net/networken/article/details/85607593
https://www.jianshu.com/p/7ad86c485f49
通过以下命令获取dashboard的登陆口令
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
至此,k8s 1.13.1部署完毕
出现错误重置
master节点
systemctl stop etcd kubelet
kubeadm reset
rm -rf /etc/kubernetes/*
rm -rf /root/.kube
rm -rf /var/lib/kubelet/*
rm -rf /var/lib/etcd/*
rm -rf /run/flannel
rm -rf /var/lib/cni
ip link delete cni0
ip link delete flannel.1
node节点
systemctl stop kubelet
kubeadm reset
rm -rf /etc/kubernetes/*
rm -rf /root/.kube
rm -rf /var/lib/kubelet/*
rm -rf /run/flannel
rm -rf /var/lib/cni
ip link delete cni0
ip link delete flannel.1
部署helm
部署客户端
cd helm
tar -zxvf helm-v2.14.3-linux-amd64.tar.gz
cp linux-amd64/helm linux-amd64/tiller /usr/bin
helm version查看版本,出现以下信息
Client: &version.Version{SemVer:"v2.14.3", GitCommit:"0e7f3...
Error: Get https://....
安装tiller Server
# 创建服务账户
kubectl -n kube-system create serviceaccount tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
# 初始化tiller
helm init --service-account tiller --tiller-image gcr.io/kubernetes-helm/tiller:v2.14.3 --skip-refresh
再次执行helm version 发现tiller正常了
# 参考链接
https://blog.51cto.com/10880347/2434647
部署istio
加载镜像
cd istio-tar-1.3.1
for i in *.tar;do docker load -i $i;done
创建命名空间并部署
kubectl create namespace istio-system
helm template install/kubernetes/helm/istio-init --name istio-init --namespace istio-system | kubectl apply -f -
# 查看CRD是否是23个
kubectl get crds | grep 'istio.io' | wc -l
helm template install/kubernetes/helm/istio --name istio --namespace istio-system | kubectl apply -f -
查看是否安装成功
kubectl get svc -n istio-system
# pod状态为running或者Completed
kubectl get pods -n istio-system