一、项目规划
主机 | IP |
---|---|
k8s | 192.168.66.57 |
k8s-node1 | 192.168.66.58 |
k8s-node2 | 192.168.66.59 |
Docker版本:18.06.1-ce
kubernetes版本:1.18.0
安装方式:官方工具kubeadm
二、环境准备
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时
# 关闭swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
# 根据规划设置主机名
hostnamectl set-hostname
# 在master添加hosts
cat >> /etc/hosts << EOF
192.168.66.57 k8
192.168.66.58 k8s-node1
192.168.66.59 k8s-node2
EOF
# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
# 时间同步
yum install ntpdate -y
ntpdate time.windows.com
三、安装Docker/kubeadm/kubelet(所有服务器)
01.安装docker
yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce-18.06.1.ce-3.el7
systemctl enable docker && systemctl start docker
docker --version
配置镜像加速:
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://jrr4lsdm.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
02.添加kubernetes的YUM源
$ cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
03.安装kubeadm,kubelet和kubectl
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
systemctl enable kubelet
四、部署Kubernetes Master节点
主节点上执行:
kubeadm init \
--apiserver-advertise-address=192.168.66.57 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
如果发现安装不成功,注意查看交换分区和防火墙规则是否关闭
根据安装的成功后的提示,进行操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#需要在从节点使用
kubeadm join 192.168.66.57:6443 --token 4rko3o.dd8q6ih3yiheogx5 \
--discovery-token-ca-cert-hash sha256:118ce2d604564ec459757d7c48de40f3e243e0c881b4f54075d678b1f1fe14af
五、部署node节点
从节点想要加入主节点,需要输入刚刚在主节点初始化后的加入节点命令:(两台从节点服务器操作)
kubeadm join 192.168.66.57:6443 --token 4rko3o.dd8q6ih3yiheogx5 \
--discovery-token-ca-cert-hash sha256:118ce2d604564ec459757d7c48de40f3e243e0c881b4f54075d678b1f1fe14af
小技巧(可选)——设置kubectl自动补全(master节点)
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
六、部署CNI网络插件
在主节点上加载网络插件:
现在官方下载插件:
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
如果网络无法进行访问,可以先将文件下载到本地,然后使用命令:
kubectl apply -f kube-flannel.yml
###也可以手动创建kube-flannel.yml文件,输入以下内容:
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unsed in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"cniVersion": "0.2.0",
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: lizhenliang/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: lizhenliang/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
查看节点状态(主节点):
[root@k8s ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s Ready master 68m v1.18.0
k8s-node1 Ready 35m v1.18.0
k8s-node2 Ready 35m v1.18.0
七、测试kubernetes集群
在Kubernetes集群中创建一个pod,验证是否正常运行:
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-f89759699-dchkt 1/1 Running 0 28m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 443/TCP 69m
service/nginx NodePort 10.99.72.25 80:31226/TCP 28m
通过浏览器进行访问任意节点IP:PORT
核心概念
pod
pod是k8s最小部署单元
一个pod中可以有一个或者多个容器一组容器
pod又称为容器组
Controllers:控制器,控制pod,启动、停止、删除
ReplicaSet
Deployment
StatefulSet
DaemonSet
Job
Cronjob
service:服务
将一组pod关联起立,提供一个统一的入口
即使pod地址发生改变,这个统一入口也不会变化,可以保证用户访问不受影响
label:标签
一组pod是一个统一的标签
service是通过标签和一组pod进行关联的
namespace:名称空间
用来隔离pod的运行环境【默认情况下,pod是可以互相访问】
使用场景
为不同的公司提供隔离的pod运行环境
为开发环境、测试环境、生产环境分别准备不同的名称空间,进行隔离
================
The Service "xxx-svc" is invalid: metadata.name: Invalid value: "xxx-svc": must be no more than 63 characters
Service
资源下的metadata.name
属性的值必须不能超过63个字符,需遵循以下域名命名:
1.只能使用英文字母(az,不区分大小写)、数字(09)以及连接符(-)。
2.连接符(-)不能连续出现、单独注册,也不能放在开头和结尾。
3.域名长度不超过63个字符。