Vagrantfile文件:
Vagrant.configure("2") do |config|
(1..3).each do |i|
config.vm.define "k8s-node#{i}" do |node|
# 设置虚拟机的Box
node.vm.box = "centos/7"
# 设置虚拟机的主机名
node.vm.hostname="k8s-node#{i}"
# 设置虚拟机的IP
node.vm.network "private_network", ip: "192.168.56.#{99+i}", netmask: "255.255.255.0"
# 设置主机与虚拟机的共享目录
# node.vm.synced_folder "~/Documents/vagrant/share", "/home/vagrant/share"
# VirtaulBox相关配置
node.vm.provider "virtualbox" do |v|
# 设置虚拟机的名称
v.name = "k8s-node#{i}"
# 设置虚拟机的内存大小
v.memory = 2048
# 设置虚拟机的CPU个数
v.cpus = 4
end
end
end
end
在当前目录下运行
#启动
vagrant up
#连接某个虚拟机(以下步骤三台都需要执行)
vagrant ssh k8s-node1
#切换root用户
su root
#输入密码
vagrant
#设置可远程账号密码登陆
vi /etc/ssh/sshd_config
#PasswordAuthentication no改为PasswordAuthentication yes 并保存编辑
#重启sshd服务
service sshd restart
#退出,执行其他两台操作
exit
# 查看默认的网卡
[root@k8s-node1 ~]# ip route show
default via 10.0.2.2 dev eth0 proto dhcp metric 100 # 默认使用eth0
10.0.2.0/24 dev eth0 proto kernel scope link src 10.0.2.15 metric 100 # 网关和ip
192.168.56.0/24 dev eth1 proto kernel scope link src 192.168.56.100 metric 101
能够看到路由表中记录的是,通过端口eth0进行数据包的收发。
分别查看k8s-node1,k8s-node2和k8s-node3的eth0所绑定的IP地址,发现它们都是相同的,全都是10.0.2.15,这些地址是供kubernetes集群通信用的;eth1上的IP地址,是通远程管理使用的。
[root@k8s-node1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 08:00:27:8d:e4:a1 brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global noprefixroute dynamic eth0
valid_lft 488sec preferred_lft 488sec
inet6 fe80::a00:27ff:fe8d:e4a1/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 08:00:27:bb:c0:06 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.100/24 brd 192.168.56.255 scope global noprefixroute eth1
*原因分析:这是因为它们使用是端口转发规则,使用同一个地址,通过不同的端口来区分。但是这种端口转发规则在以后的使用中会产生很多不必要的问题,所以需要修改为NAT网络类型
*解决:在virtualBox中 管理->全局设置->网络->添加新NET网络 保存
修改为NET网络连接,并刷新MAC地址
保证三台虚拟机可以互相ping通和外网也能ping通
设置Linux环境
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭seLinux Linux默认安全策略
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
关闭swap(关闭内存交换)
#临时关闭
swapoff -a
#永久关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab
添加主机名对应ip关系
#查看主机名
hostname
如果主机名不正确,可以通过“hostnamectl set-hostname :指定新的hostname”命令来进行修改。
vi /etc/hosts
10.0.2.4 k8s-node1
10.0.2.15 k8s-node2
10.0.2.5 k8s-node3
将桥接的IPV4流量传递到iptables的链:
cat > /etc/sysctl.d/k8s.conf <
应用规则:
sysctl --system
安装docker
卸载之前的docker
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
安装Docker -CE
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
# 设置docker repo的yum位置
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
# 安装docker,docker-cli
sudo yum -y install docker-ce docker-ce-cli containerd.io
配置docker加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://8eorvk5t.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
设置docker开机启动
systemctl enable docker
添加阿里云yum源
cat > /etc/yum.repos.d/kubernetes.repo <
安装kubeadm、kubelet、kubectl
yum install -y kubelet-1.17.3 kubeadm-1.17.3 kubectl-1.17.3
开机启动 & 启动
systemctl enable kubelet
systemctl start kubelet
(1)master节点初始化
#!/bin/bash
images=(
kube-apiserver:v1.17.3
kube-proxy:v1.17.3
kube-controller-manager:v1.17.3
kube-scheduler:v1.17.3
coredns:1.6.5
etcd:3.4.3-0
pause:3.1
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done
初始化kubeadm (注:10.0.2.4需要根据ip addr查看默认eth0网卡的地址,pod-network-cidr为pod之间访问网络)
kubeadm init \
--apiserver-advertise-address=10.0.2.4 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.17.3 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16
#运行后结果
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0.2.4:6443 --token 94epyx.ew2xu6ofhdmrbzgr \
--discovery-token-ca-cert-hash sha256:fc07908cb01204efc79b51d836956a447c41b20436509bcf1dc21b28b0a37ce9
master执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
在master节点上执行按照POD网络插件
kubectl apply -f \
https://raw.githubusercontent.com/coreos/flanne/master/Documentation/kube-flannel.yml
以上地址可能被墙,可以直接获取本地已经下载的flannel.yml运行即可
kubectl apply -f kube-flannel.yml
等待两三分钟
#查看指定名称空间的pods
kubectl get pods -n kube-system
#查看所有名称空间的pods (kube-flannel-ds-amd64-9cx8m 为running状态)
kubectl get pods --all-namespaces
查看master上的节点信息
# 所有status为(ready状态) 需要等待几分钟
kubectl get nodes
每个node节点执行
kubeadm join 10.0.2.4:6443 --token 94epyx.ew2xu6ofhdmrbzgr \
--discovery-token-ca-cert-hash sha256:fc07908cb01204efc79b51d836956a447c41b20436509bcf1dc21b28b0a37ce9
master查看节点
[root@k8s-node1 opt]# kubectl get nodes;
NAME STATUS ROLES AGE VERSION
k8s-node1 Ready master 47m v1.17.3
k8s-node2 NotReady 75s v1.17.3
k8s-node3 NotReady 76s v1.17.3
监控pod进度
watch kubectl get pod -n kube-system -o wide
(1)在主节点创建一个tomcat
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8
获取所有的资源
[root@k8s-node1 ~]# kubectl get all
NAME READY STATUS RESTARTS AGE
pod/tomcat6-5f7ccf4cb9-42rp4 0/1 ContainerCreating 0 21s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 443/TCP 2d2h
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/tomcat6 0/1 1 0 21s
NAME DESIRED CURRENT READY AGE
replicaset.apps/tomcat6-5f7ccf4cb9 1 1 0 21s
[root@k8s-node1 ~]#
获取更广泛的信息(可以看到tomcat被部署到node3节点了)
[root@k8s-node1 ~]# kubectl get all -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/tomcat6-5f7ccf4cb9-42rp4 1/1 Running 0 2m36s 10.244.2.2 k8s-node3
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kubernetes ClusterIP 10.96.0.1 443/TCP 2d2h
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/tomcat6 1/1 1 1 2m36s tomcat tomcat:6.0.53-jre8 app=tomcat6
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/tomcat6-5f7ccf4cb9 1 1 1 2m36s tomcat tomcat:6.0.53-jre8 app=tomcat6,pod-template-hash=5f7ccf4cb9
在node3节点查看镜像,已经运行的容器
[root@k8s-node3 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy v1.17.3 ae853e93800d 2 years ago 116MB
quay.io/coreos/flannel v0.11.0-amd64 ff281650a721 3 years ago 52.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 4 years ago 742kB
tomcat 6.0.53-jre8 49ab0583115a 4 years ago 290MB
[root@k8s-node3 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82af330e9ce9 tomcat "catalina.sh run" 5 minutes ago Up 5 minutes k8s_tomcat_tomcat6-5f7ccf4cb9-42rp4_default_017f3e40-97ba-426b-807c-81b1246b1457_0
a8ffef6fc19d registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_tomcat6-5f7ccf4cb9-42rp4_default_017f3e40-97ba-426b-807c-81b1246b1457_0
1e983064186b ff281650a721 "/opt/bin/flanneld -…" 16 minutes ago Up 16 minutes k8s_kube-flannel_kube-flannel-ds-amd64-7g2jf_kube-system_91b4dcf3-4132-4b38-8fda-728ea4c5bcc1_1
5b4dfb391af9 ae853e93800d "/usr/local/bin/kube…" 16 minutes ago Up 16 minutes k8s_kube-proxy_kube-proxy-qqwnh_kube-system_cdbe57ad-29d4-44a5-bfcc-27bde6967f26_1
7e8f1c697f09 registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 16 minutes ago Up 16 minutes k8s_POD_kube-flannel-ds-amd64-7g2jf_kube-system_91b4dcf3-4132-4b38-8fda-728ea4c5bcc1_1
4d1c952f7231 registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 16 minutes ago Up 16 minutes k8s_POD_kube-proxy-qqwnh_kube-system_cdbe57ad-29d4-44a5-bfcc-27bde6967f26_1
在node1上执行
[root@k8s-node1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
tomcat6-5f7ccf4cb9-42rp4 1/1 Running 0 7m23s
[root@k8s-node1 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default tomcat6-5f7ccf4cb9-42rp4 1/1 Running 0 9m25s
kube-system coredns-7f9c544f75-5kjwt 1/1 Running 1 2d3h
kube-system coredns-7f9c544f75-h64jq 1/1 Running 1 2d3h
kube-system etcd-k8s-node1 1/1 Running 1 2d3h
kube-system kube-apiserver-k8s-node1 1/1 Running 1 2d3h
kube-system kube-controller-manager-k8s-node1 1/1 Running 1 2d3h
kube-system kube-flannel-ds-amd64-7g2jf 1/1 Running 1 2d2h
kube-system kube-flannel-ds-amd64-9cx8m 1/1 Running 1 2d2h
kube-system kube-flannel-ds-amd64-rzzh7 1/1 Running 1 2d2h
kube-system kube-proxy-l2zlv 1/1 Running 1 2d2h
kube-system kube-proxy-n5lkp 1/1 Running 1 2d3h
kube-system kube-proxy-qqwnh 1/1 Running 1 2d2h
kube-system kube-scheduler-k8s-node1 1/1 Running 1 2d3h
node3模拟停止tomcat服务(发现会重新拉起服务)
[root@k8s-node3 ~]# docker stop 8660708b06f5
8660708b06f5
[root@k8s-node3 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
236063153c29 49ab0583115a "catalina.sh run" 2 seconds ago Up 1 second k8s_tomcat_tomcat6-5f7ccf4cb9-42rp4_default_017f3e40-97ba-426b-807c-81b1246b1457_2
a8ffef6fc19d registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 14 minutes ago Up 14 minutes k8s_POD_tomcat6-5f7ccf4cb9-42rp4_default_017f3e40-97ba-426b-807c-81b1246b1457_0
1e983064186b ff281650a721 "/opt/bin/flanneld -…" 24 minutes ago Up 24 minutes k8s_kube-flannel_kube-flannel-ds-amd64-7g2jf_kube-system_91b4dcf3-4132-4b38-8fda-728ea4c5bcc1_1
5b4dfb391af9 ae853e93800d "/usr/local/bin/kube…" 24 minutes ago Up 24 minutes k8s_kube-proxy_kube-proxy-qqwnh_kube-system_cdbe57ad-29d4-44a5-bfcc-27bde6967f26_1
7e8f1c697f09 registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 24 minutes ago Up 24 minutes k8s_POD_kube-flannel-ds-amd64-7g2jf_kube-system_91b4dcf3-4132-4b38-8fda-728ea4c5bcc1_1
4d1c952f7231 registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 "/pause" 24 minutes ago Up 24 minutes k8s_POD_kube-proxy-qqwnh_kube-system_cdbe57ad-29d4-44a5-bfcc-27bde6967f26_1
模拟node3服务器宕机,关闭node3服务器
[root@k8s-node1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node1 Ready master 2d3h v1.17.3
k8s-node2 Ready 2d2h v1.17.3
k8s-node3 NotReady 2d2h v1.17.3
等几分钟才能容灾恢复(node2节点会创建tomcat镜像和实例并启动)
[root@k8s-node1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
tomcat6-5f7ccf4cb9-42rp4 1/1 Terminating 2 22m 10.244.2.2 k8s-node3
tomcat6-5f7ccf4cb9-7fmb8 0/1 ContainerCreating 0 1s k8s-node2
[root@k8s-node1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
tomcat6-5f7ccf4cb9-42rp4 1/1 Terminating 2 24m 10.244.2.2 k8s-node3
tomcat6-5f7ccf4cb9-7fmb8 1/1 Running 0 2m 10.244.1.2 k8s-node2
(2)暴露nginx访问
master上执行(service端口80,代理到容器端口8080,用户访问接口随机生成)
[root@k8s-node1 ~]# kubectl expose deployment tomcat6 --port=80 --target-port=8080 --type=NodePort
service/tomcat6 exposed
查看服务 (暴露端口为31380)
[root@k8s-node1 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 443/TCP 2d3h
tomcat6 NodePort 10.96.135.188 80:31380/TCP 76s
[root@k8s-node1 ~]# kubectl get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SE LECTOR
kubernetes ClusterIP 10.96.0.1 443/TCP 2d3h
tomcat6 NodePort 10.96.135.188 80:31380/TCP 7m20s ap p=tomcat6
浏览器输入http://192.168.56.100:31380/可到tomcat首页
输入下面命令可以看到pod和封装pod 的service,pod是部署产生的,部署还有一个副本
[root@k8s-node1 ~]# kubectl get all
NAME READY STATUS RESTARTS AGE
pod/tomcat6-5f7ccf4cb9-7fmb8 1/1 Running 0 27m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 443/TCP 2d3h
service/tomcat6 NodePort 10.96.135.188 80:31380/TCP 11m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/tomcat6 1/1 1 1 49m
NAME DESIRED CURRENT READY AGE
replicaset.apps/tomcat6-5f7ccf4cb9 1 1 1 49m
(3)动态扩容测试
kubectl get deployment
[root@k8s-node1 ~]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
tomcat6 1/1 1 1 53m
扩容为3份
[root@k8s-node1 ~]# kubectl scale --replicas=3 deployment tomcat6
deployment.apps/tomcat6 scaled
[root@k8s-node1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
tomcat6-5f7ccf4cb9-7fmb8 1/1 Running 0 34m 10.244.1.2 k8s-node2
tomcat6-5f7ccf4cb9-m98hp 1/1 Running 0 75s 10.244.1.3 k8s-node2
tomcat6-5f7ccf4cb9-r89jv 1/1 Running 0 75s 10.244.2.3 k8s-node3
缩容也一样 kubectl scale --replicas=1 deployment tomcat6
删除部署和service
[root@k8s-node1 ~]# kubectl get all
NAME READY STATUS RESTARTS AGE
pod/tomcat6-5f7ccf4cb9-r89jv 1/1 Running 0 7m9s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 443/TCP 2d3h
service/tomcat6 NodePort 10.96.135.188 80:31380/TCP 24m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/tomcat6 1/1 1 1 62m
NAME DESIRED CURRENT READY AGE
replicaset.apps/tomcat6-5f7ccf4cb9 1 1 1 62m
[root@k8s-node1 ~]# kubectl delete deployment.apps/tomcat6
deployment.apps "tomcat6" deleted
[root@k8s-node1 ~]# kubectl get all
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 443/TCP 2d3h
service/tomcat6 NodePort 10.96.135.188 80:31380/TCP 26m
[root@k8s-node1 ~]# kubectl get pods
No resources found in default namespace.
[root@k8s-node1 ~]# kubectl delete service/tomcat6
service "tomcat6" deleted
(1)yaml输出
#help
[root@k8s-node1 ~]# kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --help
Create a deployment with the specified name.
Aliases:
deployment, deploy
Examples:
# Create a new deployment named my-dep that runs the busybox image.
kubectl create deployment my-dep --image=busybox
Options:
--allow-missing-template-keys=true: If true, ignore any errors in templates when a field or map key is missing in
the template. Only applies to golang and jsonpath output formats.
--dry-run=false: If true, only print the object that would be sent, without sending it.
--generator='': The name of the API generator to use.
--image=[]: Image name to run.
-o, --output='': Output format. One of:
json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-file.
--save-config=false: If true, the configuration of current object will be saved in its annotation. Otherwise, the
annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.
--template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The
template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
--validate=true: If true, use a schema to validate the input before sending it
Usage:
kubectl create deployment NAME --image=image [--dry-run] [options]
Use "kubectl options" for a list of global command-line options (applies to all commands).
[root@k8s-node1 ~]# kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --dry-run -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: tomcat6
name: tomcat6
spec:
replicas: 1
selector:
matchLabels:
app: tomcat6
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: tomcat6
spec:
containers:
- image: tomcat:6.0.53-jre8
name: tomcat
resources: {}
status: {}
yaml输出到文件
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --dry-run -o yaml > tomcat6.yaml
vi编辑,并保存
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tomcat6
name: tomcat6
spec:
replicas: 3
selector:
matchLabels:
app: tomcat6
template:
metadata:
creationTimestamp: null
labels:
app: tomcat6
spec:
containers:
- image: tomcat:6.0.53-jre8
name: tomcat
运行yaml,并查看
[root@k8s-node1 ~]# kubectl apply -f tomcat6.yaml
deployment.apps/tomcat6 created
[root@k8s-node1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
tomcat6-5f7ccf4cb9-h2wdb 1/1 Running 0 30s
tomcat6-5f7ccf4cb9-slddq 1/1 Running 0 30s
tomcat6-5f7ccf4cb9-vcp2v 1/1 Running 0 30s
[root@k8s-node1 ~]#
查看某个pod,生成yaml
[root@k8s-node1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
tomcat6-5f7ccf4cb9-h2wdb 1/1 Running 0 13m
tomcat6-5f7ccf4cb9-slddq 1/1 Running 0 13m
tomcat6-5f7ccf4cb9-vcp2v 1/1 Running 0 13m
[root@k8s-node1 ~]# kubectl get pods tomcat6-5f7ccf4cb9-h2wdb
NAME READY STATUS RESTARTS AGE
tomcat6-5f7ccf4cb9-h2wdb 1/1 Running 0 14m
[root@k8s-node1 ~]# kubectl get pods tomcat6-5f7ccf4cb9-h2wdb -o yaml
关联部署和service
[root@k8s-node1 ~]# kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --dry-run -o yaml > tomcat6-deployment.yaml
[root@k8s-node1 ~]# kubectl expose deployment tomcat6 --port=80 --target-port=8080 --type=NodePort --dry-run -o yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: tomcat6
name: tomcat6
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8080
selector:
app: tomcat6
type: NodePort
status:
loadBalancer: {}
#copy上面暴露服务的yaml用---进行拼接,表示部署完毕并暴露服务
[root@k8s-node1 ~]#vi tomcat6-deployment.yaml
apiVersion: apps/v1
kind: Deployment #部署
metadata:
labels:
app: tomcat6 #标签
name: tomcat6
spec:
replicas: 3 #副本数
selector:
matchLabels:
app: tomcat6
template:
metadata:
labels:
app: tomcat6
spec:
containers:
- image: tomcat:6.0.53-jre8
name: tomcat
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: tomcat6
name: tomcat6
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8080
selector:
app: tomcat6
type: NodePort
部署并暴露服务
[root@k8s-node1 ~]# kubectl apply -f tomcat6-deployment.yaml
deployment.apps/tomcat6 created
service/tomcat6 created
通过Ingress发现pod进行关联。基于域名访问
通过Ingress controller实现POD负载均衡
支持TCP/UDP 4层负载均衡和HTTP 7层负载均衡
ingress管理多个service
service管理多个pods
部署Ingress controller
执行“k8s/ingress-controller.yaml”
[root@k8s-node1 k8s]# kubectl apply -f ingress-controller.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
daemonset.apps/nginx-ingress-controller created
service/ingress-nginx created
查看
[root@k8s-node1 k8s]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTAR TS AGE
default tomcat6-5f7ccf4cb9-7tsfd 1/1 Running 0 28m
default tomcat6-5f7ccf4cb9-kggt4 1/1 Running 0 28m
default tomcat6-5f7ccf4cb9-zdv97 1/1 Running 0 28m
ingress-nginx nginx-ingress-controller-2nn5c 0/1 ContainerCreating 0 96s
ingress-nginx nginx-ingress-controller-lm95q 0/1 Running 0 96s
kube-system coredns-7f9c544f75-5kjwt 1/1 Running 3 6d23h
kube-system coredns-7f9c544f75-h64jq 1/1 Running 3 6d23h
kube-system etcd-k8s-node1 1/1 Running 3 6d23h
kube-system kube-apiserver-k8s-node1 1/1 Running 3 6d23h
kube-system kube-controller-manager-k8s-node1 1/1 Running 3 6d23h
kube-system kube-flannel-ds-amd64-7g2jf 1/1 Running 4 6d23h
kube-system kube-flannel-ds-amd64-9cx8m 1/1 Running 3 6d23h
kube-system kube-flannel-ds-amd64-rzzh7 1/1 Running 3 6d23h
kube-system kube-proxy-l2zlv 1/1 Running 3 6d23h
kube-system kube-proxy-n5lkp 1/1 Running 3 6d23h
kube-system kube-proxy-qqwnh 1/1 Running 4 6d23h
kube-system kube-scheduler-k8s-node1 1/1 Running 3 6d23h
master节点负责调度,具体node2和node3执行,能够看到它们正在下载镜像(status为ContainerCreating)
[root@k8s-node1 k8s]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTAR TS AGE
default tomcat6-5f7ccf4cb9-7tsfd 1/1 Running 0 28m
default tomcat6-5f7ccf4cb9-kggt4 1/1 Running 0 28m
default tomcat6-5f7ccf4cb9-zdv97 1/1 Running 0 28m
ingress-nginx nginx-ingress-controller-2nn5c 0/1 ContainerCreating 0 96s
ingress-nginx nginx-ingress-controller-lm95q 0/1 Running 0
创建Ingress规则
通过Ingress发现pod进行关联。基于域名访问
通过Ingress controller实现POD负载均衡
支持TCP/UDP 4层负载均衡和HTTP 7层负载均衡
(1)部署Ingress contoller
执行“k8s/ingress-controller.yaml”
[root@k8s-node1 k8s]# kubectl apply -f ingress-controller.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
daemonset.apps/nginx-ingress-controller created
service/ingress-nginx created
查看 (这里master节点负责调度,具体执行node1和node2完成,能够看到他们正在下载镜像)
[root@k8s-node1 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default tomcat6-5f7ccf4cb9-7tsfd 1/1 Running 1 47h
default tomcat6-5f7ccf4cb9-kggt4 1/1 Running 1 47h
default tomcat6-5f7ccf4cb9-zdv97 1/1 Running 1 47h
ingress-nginx nginx-ingress-controller-2nn5c 1/1 ContainerCreating 2 47h
ingress-nginx nginx-ingress-controller-lm95q 1/1 ContainerCreating 2 47h
kube-system coredns-7f9c544f75-5kjwt 1/1 Running 4 8d
kube-system coredns-7f9c544f75-h64jq 1/1 Running 4 8d
kube-system etcd-k8s-node1 1/1 Running 4 8d
kube-system kube-apiserver-k8s-node1 1/1 Running 4 8d
kube-system kube-controller-manager-k8s-node1 1/1 Running 4 8d
kube-system kube-flannel-ds-amd64-7g2jf 1/1 Running 5 8d
kube-system kube-flannel-ds-amd64-9cx8m 1/1 Running 4 8d
kube-system kube-flannel-ds-amd64-rzzh7 1/1 Running 4 8d
kube-system kube-proxy-l2zlv 1/1 Running 4 8d
kube-system kube-proxy-n5lkp 1/1 Running 4 8d
kube-system kube-proxy-qqwnh 1/1 Running 5 8d
kube-system kube-scheduler-k8s-node1 1/1 Running 4 8d
(2)创建Ingress规则
创建ingress-tomcat6.yaml (vi编辑)
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: web
spec:
rules:
- host: tomcat6.kubenetes.com
http:
paths:
- backend:
serviceName: tomcat6
servicePort: 80
应用
[root@k8s-node1 k8s]# kubectl apply -f ingress-tomcat6.yaml
ingress.extensions/web created
修改本机的hosts文件,添加域名转换规则
#kubernetes
192.168.56.102 tomcat6.kubenetes.com
测试: http://tomcat6.kubenetes.com/ 访问到tomcat
并且集群中即便有一个节点不可用,也不影响整体的运行。
1)安装helm(master节点执行)
helm是kubernetes的包管理器。包管理器类似于Ubuntu中使用的apt、Centos中使用的yum或者Python中使用的pip一样,能够快速查找、下载和安装使用包。Helm由客户端组件helm和服务端组件Tiller组成,能够将一组k8s资源打包统一管理,是查找、共享和使用kubernetes构建软件的最佳方式。
安装
curl -L http://git.io/get_helm.sh | bash
墙原因,上传我们给定的get_helm.sh,chmod 700,然后./get_helm.sh
可能有文件各式兼容行问题,用vi打开该sh文件,输入:
:set ff
回车,显示fileformat=dos,重新设置下文件格式:
:set ff=unix
保存退出
:wq
注意:如果安装失败,可以去https://github.com/helm/helm/releases/tag/v2.17.0下载压缩包helm-v2.17.0-linux-amd64.tar.gz
解压
tar xf helm-v2.17.0-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin
cp linux-amd64/tiller /usr/local/bin
验证版本
helm version
创建权限,创建helm-rbac.yaml,写入如下内容
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
应用配置
kubectl apply -f helm-rbac.yaml
安装Tiller(master执行)
初始化
helm init --service-account=tiller --tiller-image=sapcc/tiller:v2.16.3 --history-max 300
--tiller-image
指定镜像,否则会被墙
等待节点上部署的tiller完成即可
helm和tiller查看
helm
tiller
测试
helm install stable/nginx-ingress --name nginx-ingress
安装openEBS
它是k8s的存储类型StorageClass,因为集群里没有StorageClass,所以我们安装OpenEBS作为StorageClass,且必须手动指定默认是它
查看所有节点信息
[root@k8s-node1 k8s]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-node1 Ready master 10d v1.17.3 10.0.2.4 CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.13
k8s-node2 Ready 10d v1.17.3 10.0.2.15 CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.13
k8s-node3 Ready 10d v1.17.3 10.0.2.5 CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.13
确认master节点是否有Taint,如下看到master节点有Taint。
[root@k8s-node1 k8s]# kubectl describe node k8s-node1 | grep Taint
Taints: node-role.kubernetes.io/master:NoSchedule
去掉master节点的Taint
[root@k8s-node1 k8s]# kubectl taint nodes k8s-node1 node-role.kubernetes.io/master:NoSchedule-
node/k8s-node1 untainted
再次查看,发现已经没有Taint
[root@k8s-node1 k8s]# kubectl describe node k8s-node1 | grep Taint Taints:
创建OpenEBS的namespaces,OpenEBS相关资源将创建在这个namespaces下:
kubectl create ns openebs
vi编辑openebs-operator-1.7.0.yaml
# This manifest deploys the OpenEBS control plane components, with associated CRs & RBAC rules
# NOTE: On GKE, deploy the openebs-operator.yaml in admin context
# Create the OpenEBS namespace
apiVersion: v1
kind: Namespace
metadata:
name: openebs
---
# Create Maya Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: openebs-maya-operator
namespace: openebs
---
# Define Role that allows operations on K8s pods/deployments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: openebs-maya-operator
rules:
- apiGroups: ["*"]
resources: ["nodes", "nodes/proxy"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["statefulsets", "daemonsets"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["resourcequotas", "limitranges"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "certificatesigningrequests"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["*"]
resources: [ "disks", "blockdevices", "blockdeviceclaims"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorpoolclusters", "storagepoolclaims", "storagepoolclaims/finalizers", "cstorpoolclusters/finalizers", "storagepools"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "castemplates", "runtasks"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes", "cstorvolumeclaims", "cstorvolumepolicies"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorpoolinstances", "cstorpoolinstances/finalizers"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"]
verbs: ["*" ]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "create", "list", "delete", "update", "patch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
- apiGroups: ["*"]
resources: ["upgradetasks"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "create", "delete", "watch"]
---
# Bind the Service Account with the Role Privileges.
# TODO: Check if default account also needs to be there
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: openebs-maya-operator
subjects:
- kind: ServiceAccount
name: openebs-maya-operator
namespace: openebs
roleRef:
kind: ClusterRole
name: openebs-maya-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: maya-apiserver
namespace: openebs
labels:
name: maya-apiserver
openebs.io/component-name: maya-apiserver
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: maya-apiserver
openebs.io/component-name: maya-apiserver
replicas: 1
strategy:
type: Recreate
rollingUpdate: null
template:
metadata:
labels:
name: maya-apiserver
openebs.io/component-name: maya-apiserver
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: maya-apiserver
imagePullPolicy: IfNotPresent
image: quay.io/openebs/m-apiserver:1.7.0
ports:
- containerPort: 5656
env:
# OPENEBS_IO_KUBE_CONFIG enables maya api service to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for maya api server version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_IO_K8S_MASTER enables maya api service to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for maya api server version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://172.28.128.3:8080"
# OPENEBS_NAMESPACE provides the namespace of this deployment as an
# environment variable
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# OPENEBS_MAYA_POD_NAME provides the name of this pod as
# environment variable
- name: OPENEBS_MAYA_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# If OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG is false then OpenEBS default
# storageclass and storagepool will not be created.
- name: OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG
value: "true"
# OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL decides whether default cstor sparse pool should be
# configured as a part of openebs installation.
# If "true" a default cstor sparse pool will be configured, if "false" it will not be configured.
# This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG
# is set to true
- name: OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL
value: "false"
# OPENEBS_IO_BASE_DIR is used to configure base directory for openebs on host path.
# Where OpenEBS can store required files. Default base path will be /var/openebs
# - name: OPENEBS_IO_BASE_DIR
# value: "/var/openebs"
# OPENEBS_IO_CSTOR_TARGET_DIR can be used to specify the hostpath
# to be used for saving the shared content between the side cars
# of cstor volume pod.
# The default path used is /var/openebs/sparse
#- name: OPENEBS_IO_CSTOR_TARGET_DIR
# value: "/var/openebs/sparse"
# OPENEBS_IO_CSTOR_POOL_SPARSE_DIR can be used to specify the hostpath
# to be used for saving the shared content between the side cars
# of cstor pool pod. This ENV is also used to indicate the location
# of the sparse devices.
# The default path used is /var/openebs/sparse
#- name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR
# value: "/var/openebs/sparse"
# OPENEBS_IO_JIVA_POOL_DIR can be used to specify the hostpath
# to be used for default Jiva StoragePool loaded by OpenEBS
# The default path used is /var/openebs
# This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG
# is set to true
#- name: OPENEBS_IO_JIVA_POOL_DIR
# value: "/var/openebs"
# OPENEBS_IO_LOCALPV_HOSTPATH_DIR can be used to specify the hostpath
# to be used for default openebs-hostpath storageclass loaded by OpenEBS
# The default path used is /var/openebs/local
# This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG
# is set to true
#- name: OPENEBS_IO_LOCALPV_HOSTPATH_DIR
# value: "/var/openebs/local"
- name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE
value: "quay.io/openebs/jiva:1.7.1"
- name: OPENEBS_IO_JIVA_REPLICA_IMAGE
value: "quay.io/openebs/jiva:1.7.1"
- name: OPENEBS_IO_JIVA_REPLICA_COUNT
value: "3"
- name: OPENEBS_IO_CSTOR_TARGET_IMAGE
value: "quay.io/openebs/cstor-istgt:1.7.0"
- name: OPENEBS_IO_CSTOR_POOL_IMAGE
value: "quay.io/openebs/cstor-pool:1.7.0"
- name: OPENEBS_IO_CSTOR_POOL_MGMT_IMAGE
value: "quay.io/openebs/cstor-pool-mgmt:1.7.0"
- name: OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE
value: "quay.io/openebs/cstor-volume-mgmt:1.7.0"
- name: OPENEBS_IO_VOLUME_MONITOR_IMAGE
value: "quay.io/openebs/m-exporter:1.7.0"
- name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE
value: "quay.io/openebs/m-exporter:1.7.0"
- name: OPENEBS_IO_HELPER_IMAGE
value: "quay.io/openebs/linux-utils:1.7.0"
# OPENEBS_IO_ENABLE_ANALYTICS if set to true sends anonymous usage
# events to Google Analytics
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator"
# OPENEBS_IO_ANALYTICS_PING_INTERVAL can be used to specify the duration (in hours)
# for periodic ping events sent to Google Analytics.
# Default is 24h.
# Minimum is 1h. You can convert this to weekly by setting 168h
#- name: OPENEBS_IO_ANALYTICS_PING_INTERVAL
# value: "24h"
livenessProbe:
exec:
command:
- /usr/local/bin/mayactl
- version
initialDelaySeconds: 30
periodSeconds: 60
readinessProbe:
exec:
command:
- /usr/local/bin/mayactl
- version
initialDelaySeconds: 30
periodSeconds: 60
---
apiVersion: v1
kind: Service
metadata:
name: maya-apiserver-service
namespace: openebs
labels:
openebs.io/component-name: maya-apiserver-svc
spec:
ports:
- name: api
port: 5656
protocol: TCP
targetPort: 5656
selector:
name: maya-apiserver
sessionAffinity: None
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-provisioner
namespace: openebs
labels:
name: openebs-provisioner
openebs.io/component-name: openebs-provisioner
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: openebs-provisioner
openebs.io/component-name: openebs-provisioner
replicas: 1
strategy:
type: Recreate
rollingUpdate: null
template:
metadata:
labels:
name: openebs-provisioner
openebs.io/component-name: openebs-provisioner
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: openebs-provisioner
imagePullPolicy: IfNotPresent
image: quay.io/openebs/openebs-k8s-provisioner:1.7.0
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that provisioner should forward the volume create/delete requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs provisioner version 0.5.3-RC1 onwards
#- name: OPENEBS_MAYA_SERVICE_NAME
# value: "maya-apiserver-apiservice"
livenessProbe:
exec:
command:
- pgrep
- ".*openebs"
initialDelaySeconds: 30
periodSeconds: 60
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-snapshot-operator
namespace: openebs
labels:
name: openebs-snapshot-operator
openebs.io/component-name: openebs-snapshot-operator
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: openebs-snapshot-operator
openebs.io/component-name: openebs-snapshot-operator
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-snapshot-operator
openebs.io/component-name: openebs-snapshot-operator
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: snapshot-controller
image: quay.io/openebs/snapshot-controller:1.7.0
imagePullPolicy: IfNotPresent
env:
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
livenessProbe:
exec:
command:
- pgrep
- ".*controller"
initialDelaySeconds: 30
periodSeconds: 60
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that snapshot controller should forward the snapshot create/delete requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs provisioner version 0.5.3-RC1 onwards
#- name: OPENEBS_MAYA_SERVICE_NAME
# value: "maya-apiserver-apiservice"
- name: snapshot-provisioner
image: quay.io/openebs/snapshot-provisioner:1.7.0
imagePullPolicy: IfNotPresent
env:
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that snapshot provisioner should forward the clone create/delete requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs provisioner version 0.5.3-RC1 onwards
#- name: OPENEBS_MAYA_SERVICE_NAME
# value: "maya-apiserver-apiservice"
livenessProbe:
exec:
command:
- pgrep
- ".*provisioner"
initialDelaySeconds: 30
periodSeconds: 60
---
# This is the node-disk-manager related config.
# It can be used to customize the disks probes and filters
apiVersion: v1
kind: ConfigMap
metadata:
name: openebs-ndm-config
namespace: openebs
labels:
openebs.io/component-name: ndm-config
data:
# udev-probe is default or primary probe which should be enabled to run ndm
# filterconfigs contails configs of filters - in their form fo include
# and exclude comma separated strings
node-disk-manager.config: |
probeconfigs:
- key: udev-probe
name: udev probe
state: true
- key: seachest-probe
name: seachest probe
state: false
- key: smart-probe
name: smart probe
state: true
filterconfigs:
- key: os-disk-exclude-filter
name: os disk exclude filter
state: true
exclude: "/,/etc/hosts,/boot"
- key: vendor-filter
name: vendor filter
state: true
include: ""
exclude: "CLOUDBYT,OpenEBS"
- key: path-filter
name: path filter
state: true
include: ""
exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: openebs-ndm
namespace: openebs
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: openebs-ndm
openebs.io/component-name: ndm
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 1.7.0
spec:
# By default the node-disk-manager will be run on all kubernetes nodes
# If you would like to limit this to only some nodes, say the nodes
# that have storage attached, you could label those node and use
# nodeSelector.
#
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
# kubectl label node "openebs.io/nodegroup"="storage-node"
#nodeSelector:
# "openebs.io/nodegroup": "storage-node"
serviceAccountName: openebs-maya-operator
hostNetwork: true
containers:
- name: node-disk-manager
image: quay.io/openebs/node-disk-manager-amd64:v0.4.7
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /host/node-disk-manager.config
subPath: node-disk-manager.config
readOnly: true
- name: udev
mountPath: /run/udev
- name: procmount
mountPath: /host/proc
readOnly: true
- name: basepath
mountPath: /var/openebs/ndm
- name: sparsepath
mountPath: /var/openebs/sparse
env:
# namespace in which NDM is installed will be passed to NDM Daemonset
# as environment variable
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# pass hostname as env variable using downward API to the NDM container
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# specify the directory where the sparse files need to be created.
# if not specified, then sparse files will not be created.
- name: SPARSE_FILE_DIR
value: "/var/openebs/sparse"
# Size(bytes) of the sparse file to be created.
- name: SPARSE_FILE_SIZE
value: "10737418240"
# Specify the number of sparse files to be created
- name: SPARSE_FILE_COUNT
value: "0"
livenessProbe:
exec:
command:
- pgrep
- ".*ndm"
initialDelaySeconds: 30
periodSeconds: 60
volumes:
- name: config
configMap:
name: openebs-ndm-config
- name: udev
hostPath:
path: /run/udev
type: Directory
# mount /proc (to access mount file of process 1 of host) inside container
# to read mount-point of disks and partitions
- name: procmount
hostPath:
path: /proc
type: Directory
- name: basepath
hostPath:
path: /var/openebs/ndm
type: DirectoryOrCreate
- name: sparsepath
hostPath:
path: /var/openebs/sparse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-ndm-operator
namespace: openebs
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: node-disk-operator
image: quay.io/openebs/node-disk-operator-amd64:v0.4.7
imagePullPolicy: Always
readinessProbe:
exec:
command:
- stat
- /tmp/operator-sdk-ready
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# the service account of the ndm-operator pod
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPERATOR_NAME
value: "node-disk-operator"
- name: CLEANUP_JOB_IMAGE
value: "quay.io/openebs/linux-utils:1.7.0"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-admission-server
namespace: openebs
labels:
app: admission-webhook
openebs.io/component-name: admission-webhook
openebs.io/version: 1.7.0
spec:
replicas: 1
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
app: admission-webhook
template:
metadata:
labels:
app: admission-webhook
openebs.io/component-name: admission-webhook
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: admission-webhook
image: quay.io/openebs/admission-server:1.7.0
imagePullPolicy: IfNotPresent
args:
- -alsologtostderr
- -v=2
- 2>&1
env:
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ADMISSION_WEBHOOK_NAME
value: "openebs-admission-server"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-localpv-provisioner
namespace: openebs
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 1.7.0
spec:
selector:
matchLabels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 1.7.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: Always
image: quay.io/openebs/provisioner-localpv:1.7.0
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator"
- name: OPENEBS_IO_HELPER_IMAGE
value: "quay.io/openebs/linux-utils:1.7.0"
livenessProbe:
exec:
command:
- pgrep
- ".*localpv"
initialDelaySeconds: 30
periodSeconds: 60
---
执行
kubectl apply -f openebs-operator-1.7.0.yaml
kubectl get sc
#发现No resources found in default namespace.
kubectl get pods --all-namespaces
#等待所有openebs的状态都是Running
将openebs-hostpath设置为默认的StorageClass
kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
查看
[root@k8s-node1 k8s]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
openebs-device openebs.io/local Delete WaitForFirstConsumer false 4m19s
openebs-hostpath (default) openebs.io/local Delete WaitForFirstConsumer false 4m19s
openebs-jiva-default openebs.io/provisioner-iscsi Delete Immediate false 4m19s
openebs-snapshot-promoter volumesnapshot.external-storage.k8s.io/snapshot-promoter Delete Immediate false 4m19s
至此openEBS的LocalPV已作为默认的存储类型创建成功。由于开头手动去掉了master节点的Taint,可以在安装玩OpenEBS后将master节点的Taint加上,避免业务相关的工作负载调度到master节点抢占master资源 (master节点名字要改成自己的)
kubectl taint nodes master node-role.kubernetes.io=master:NoSchedule