按照需要的文档点击这里下载
主机IP 主机名规划
192.168.186.128 k8s-master01
192.168.186.129 k8s-node01
192.168.186.130 k8s-node02
#永久修改主机名
hostnamectl set-hostname k8s-master01 && bash #在master01上操作
hostnamectl set-hostname k8s-node01 && bash #在node01上操作
hostnamectl set-hostname k8s-node02 && bash #在node02上操作
#时间同步:
yum install ntpdate -y
ntpdate time.windows.com
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时
swapoff -a # 临时
sed -i 's/.*swap.*/#&/' /etc/fstab # 永久
#修改内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter #加载br_netfilter模块
lsmod |grep br_netfilter #验证模块是否加载成功
sysctl -p /etc/sysctl.d/k8s.conf #使刚才修改的内核参数生效
cat >> /etc/hosts << EOF
192.168.186.128 k8s-master01
192.168.186.129 k8s-node01
192.168.186.130 k8s-node02
EOF
#所有机器上都操作
ssh-keygen -t rsa #一路回车,不输入密码
###把本地的ssh公钥文件安装到远程主机对应的账户
for i in k8s-master01 k8s-node01 k8s-node02 ;do ssh-copy-id -i .ssh/id_rsa.pub $i ;done
在kubernetes中Service有两种代理模型,一种是基于iptables的,一种是基于ipvs,两者对比ipvs的性能要高,如果想要使用ipvs模型,需要手动载入ipvs模块
yum -y install ipset ipvsadm
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules # 执行脚本
/etc/sysconfig/modules/ipvs.modules
#验证ipvs模块
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
官网下载地址
yum -y install epel-release wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#yum list docker-ce --showduplicates | sort -r #查看yum仓库中可以安装的docker版本
#yum -y install docker-ce-18.06.1.ce-3.el7 #安装固定版本
yum -y install docker-ce #安装docker最新版
systemctl enable docker && systemctl start docker
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload && systemctl restart docker && systemctl status docker
docker --version
#安装cri-dockerd插件
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1-3.el7.x86_64.rpm
rpm -ivh cri-dockerd-0.3.1-3.el7.x86_64.rpm
#备份并更新cri-docker.service文件
mv /usr/lib/systemd/system/cri-docker.service /usr/lib/systemd/system/cri-docker.service.default
#空文件复制如下信息
cat > /usr/lib/systemd/system/cri-docker.service << EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
#启动cir-dockerd
systemctl daemon-reload
systemctl start cri-docker.service
systemctl enable cri-docker.service
systemctl status cri-docker.service
cri-dockerd --version
##添加yum 源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum clean all && yum makecache -y
#yum makecache fast
yum list kubectl --showduplicates | sort -r #列出kubectl可用的版本
##centos系统安装命令
yum -y install kubelet-1.27.4-0 kubeadm-1.27.4-0 kubectl-1.27.4-0
systemctl enable kubelet
#ubuntu系统安装命令
sudo apt install kubelet=1.27.4-00 kubeadm=1.27.4-00 kubectl=1.27.4-00 -y #指定版本,最新版本拉去镜像有问题
sudo apt-mark hold kubelet kubeadm kubectl
sudo systemctl enable kubelet.service
sudo systemctl restart kubelet.service
#kubeadm config images list #查看集群安装需要的命令
kubeadm init --kubernetes-version v1.27.4 --apiserver-advertise-address=192.168.186.128 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16 --image-repository registry.aliyuncs.com/google_containers --cri-socket unix:///var/run/cri-dockerd.sock --ignore-preflight-errors=all
#这句执行后的全部复制保存起来,加入集群时候需要的
#kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock
执行kubeadm init命令时,增加参数:(kubeadm reset时也需要此参数)
kubeadm token create --print-join-command #重新生成token
#其他master节点上执行
kubeadm join 192.168.186.128:6443 --token vouckc.yk26zmqg852q2y7t --discovery-token-ca-cert-hash sha256:2176b30e8e3bf17934e1323831a55c0b1f319528139d4ad490d2d9a4e0c8f1b6 --control-plane --cri-socket unix:///var/run/cri-dockerd.sock
#注意 --control-plane是添加master节点的
#其他node节点上执行
kubeadm join 192.168.186.128:6443 --token vouckc.yk26zmqg852q2y7t --discovery-token-ca-cert-hash sha256:2176b30e8e3bf17934e1323831a55c0b1f319528139d4ad490d2d9a4e0c8f1b6 --cri-socket unix:///var/run/cri-dockerd.sock
####Centos系统
rpm -aq |grep completion
yum -y install bash-completion #安装补全命令的包
kubectl completion bash
source /usr/share/bash-completion/bash_completion
kubectl completion bash >/etc/profile.d/kubectl.sh
source /etc/profile.d/kubectl.sh
cat >> /root/.bashrc <<EOF
source /etc/profile.d/kubectl.sh
EOF
#kubectl --help|grep completion
#kubectl completion --help
#kubectl completion --help|grep source
source <(kubectl completion bash)
官网地址:https://kubernetes.io/docs/concepts/cluster-administration/addons/
点击上面的网址打开新的网页,下载你需要yaml插件。
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
wget https://docs.projectcalico.org/archive/v3.20/manifests/calico.yaml
kubectl apply -f kube-flannel.yml #flannel和calico任意选择一个部署就可以
kubectl apply -f calico.yaml #flannel和calico任意选择一个部署就可以
kubectl get pods -n kube-system
kubectl get pods -n kube-flannel
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pods,svc #查看暴露端口
浏览器访问: 集群任何IP:端口
官网网址: https://github.com/kubernetes/dashboard/releases
这里下载对应的文件,文件需要跟K8S版本对应上
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
默认Dashboard只能群集内部访问,修改service为NodePort类型,暴露到外部:
vim recommended.yaml #需要修改的信息在下面这段中
… …… …… …… …… …… …… …… …… …… …… …… …… …… …… …… …
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #新添加的,下面有一个删除了,注意缩进了2个空格
ports:
- port: 443
targetPort: 8443
nodePort: 30001
selector:
k8s-app: kubernetes-dashboard
… …… …… …… …… …… …… …… …… …… …… …… …… …… …… …… …
kubectl apply -f recommended.yaml
kubectl get pods -n kubernetes-dashboard
[root@k8s-master ~]# kubectl get pods,svc -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-7b59f7d4df-znxj6 1/1 Running 0 114s
kubernetes-dashboard-5dbf55bd9d-rzkpw 1/1 Running 0 114s
创建service account并绑定默认cluster-admin管理员群集角色
kubectl create serviceaccount dashboard-admin -n kube-system #创建用户
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin #用户授权
kubectl -n kube-system get serviceaccounts |grep dashboard #查看sa是否创建成功
kubectl -n kube-system create token dashboard-admin --duration=518400s #创建用户Token
#一年365*24*60*60=31536000s 第一次token登录后有报错,请再次执行一次token就好了
kubectl apply -f components.yaml
kubectl top nodes
kubectl top pods
[root@k8s-master01 ]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 873m 14% 1077Mi 13%
k8s-node01 422m 7% 712Mi 12%
k8s-node02 303m 7% 606Mi 16%
[root@k8s-master01 ]#
[root@k8s-master01 ]#
[root@k8s-master01 ]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 873m 14% 1077Mi 13%
k8s-node01 422m 7% 712Mi 12%
k8s-node02 303m 7% 606Mi 16%
[root@k8s-master01 ]#