声明:本实验是在Linux系统环境下进行演示。
$ yum install -y kubectl
$ kubectl version --client
Client Version: v1.28.2
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
# For AMD64 / x86_64
[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-$(uname)-amd64
注:需要科学上网
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
$ kind version
kind v0.20.0 go1.20.4 linux/amd64
要使用kind,需要安装docker。
# 安装yum-utils包(它提供yum-config-manager实用程序)
$ sudo yum install -y yum-utils
# 设置存储库
$ sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 安装Docker Engine、containerd和Docker Compose
$ sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# 启动docker服务
$ systemctl start docker && systemctl enable docker
$ systemctl status docker
# 查看docker信息
$ docker version
Client: Docker Engine - Community
Version: 24.0.7
API version: 1.43
Go version: go1.20.10
Git commit: afdd53b
Built: Thu Oct 26 09:11:35 2023
OS/Arch: linux/amd64
Context: default
1️⃣# 部署kind
[root@Kind ~]# kind create cluster
Creating cluster "kind" ...
✓ Ensuring node image (kindest/node:v1.27.3)
✓ Preparing nodes
✓ Writing configuration
✓ Starting control-plane ️
✓ Installing CNI
✓ Installing StorageClass
Set kubectl context to "kind-kind"
You can now use your cluster with:
kubectl cluster-info --context kind-kind
Not sure what to do next? Check out https://kind.sigs.k8s.io/docs/user/quick-start/
2️⃣# 部署kind-2
[root@Kind ~]# kind create cluster --name kind-02
Creating cluster "kind-02" ...
✓ Ensuring node image (kindest/node:v1.27.3)
✓ Preparing nodes
✓ Writing configuration
✓ Starting control-plane ️
✓ Installing CNI
✓ Installing StorageClass
Set kubectl context to "kind-kind-02"
You can now use your cluster with:
kubectl cluster-info --context kind-kind-02
Thanks for using kind!
[root@Kind ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kind-control-plane Ready control-plane 4h36m v1.27.3
[root@Kind ~]# kind get clusters
kind
kind-02
查看集群Pod组件。
[root@Kind ~]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-66788 1/1 Running 0 11s
kube-system coredns-5d78c9869d-snf8b 1/1 Running 0 11s
kube-system etcd-kind-01-control-plane 1/1 Running 0 24s
kube-system kindnet-ljmm9 1/1 Running 0 11s
kube-system kube-apiserver-kind-01-control-plane 1/1 Running 0 26s
kube-system kube-controller-manager-kind-01-control-plane 1/1 Running 0 24s
kube-system kube-proxy-g4z7c 1/1 Running 0 11s
kube-system kube-scheduler-kind-01-control-plane 1/1 Running 0 24s
local-path-storage local-path-provisioner-6bc4bddd6b-hg48b 1/1 Running 0 11s
[root@Kind ~]# kubectl config use-context kind-kind
Switched to context "kind-kind".
[root@Kind ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kind-control-plane Ready control-plane 5h49m v1.27.3
[root@Kind ~]# kubectl config use-context kind-kind-02
Switched to context "kind-kind-02".
[root@Kind ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kind-02-control-plane Ready control-plane 69m v1.27.3
kind create cluster --name kind-xyb --image kindest/node:v1.25.4
[root@Kind ~]# kind create cluster --name kind-xyb --image kindest/node:v1.25.3
Creating cluster "kind-xyb" ...
✓ Ensuring node image (kindest/node:v1.25.3)
✓ Preparing nodes
✓ Writing configuration
✓ Starting control-plane ️
✓ Installing CNI
✓ Installing StorageClass
Set kubectl context to "kind-kind-xyb"
You can now use your cluster with:
kubectl cluster-info --context kind-kind-xyb
Not sure what to do next? Check out https://kind.sigs.k8s.io/docs/user/quick-start/
[root@Kind ~]# kubectl config use-context kind-kind-xyb
Switched to context "kind-kind-xyb".
[root@Kind ~]# kubectl get nodes -A
NAME STATUS ROLES AGE VERSION
kind-xyb-control-plane Ready control-plane 9m15s v1.25.3
[root@Kind ~]# kind get clusters
kind
kind-02
[root@Kind ~]# kind delete cluster --name kind-02
Deleting cluster "kind-02" ...
Deleted nodes: ["kind-02-control-plane"]
[root@Kind ~]# kind get clusters
kind
[root@Kind ~]# kind delete cluster --name kind
Deleting cluster "kind" ...
Deleted nodes: ["kind-control-plane"]
[root@Kind ~]# kind get clusters
No kind clusters found.
[root@Kind ~]#
[root@Kind ~]# kind -h
kind creates and manages local Kubernetes clusters using Docker container 'nodes'
Usage:
kind [command]
Available Commands:
build Build one of [node-image]
completion Output shell completion code for the specified shell (bash, zsh or fish)
create Creates one of [cluster]
delete Deletes one of [cluster]
export Exports one of [kubeconfig, logs]
get Gets one of [clusters, nodes, kubeconfig]
help Help about any command
load Loads images into nodes
version Prints the kind CLI version
Flags:
-h, --help help for kind
--loglevel string DEPRECATED: see -v instead
-q, --quiet silence all stderr output
-v, --verbosity int32 info log verbosity, higher value produces more output
--version version for kind
Use "kind [command] --help" for more information about a command.
[root@Kind ~]# kind create cluster -h
Creates a local Kubernetes cluster using Docker container 'nodes'
Usage:
kind create cluster [flags]
Flags:
--config string path to a kind config file
-h, --help help for cluster
--image string node docker image to use for booting the cluster
--kubeconfig string sets kubeconfig path instead of $KUBECONFIG or $HOME/.kube/config
-n, --name string cluster name, overrides KIND_CLUSTER_NAME, config (default kind)
--retain retain nodes for debugging when cluster creation fails
--wait duration wait for control plane node to be ready (default 0s)
Global Flags:
--loglevel string DEPRECATED: see -v instead
-q, --quiet silence all stderr output
-v, --verbosity int32 info log verbosity, higher value produces more output
# config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: xybdiy
nodes:
- role: control-plane
- role: worker
- role: worker
该配置文件表示一共要创建 3 个节点,一个控制节点,两个工作节点,在创建集群的时候只需要通过 --config 参
数指定该文件即可:
kind create cluster --config config.yaml
[root@Kind ~]# kubectl create cluster --config config.yaml
error: unknown flag: --config
See 'kubectl create --help' for usage.
[root@Kind ~]# kind create cluster --config config.yaml
Creating cluster "xybdiy" ...
✓ Ensuring node image (kindest/node:v1.27.3)
✓ Preparing nodes
✓ Writing configuration
✓ Starting control-plane ️
✓ Installing CNI
✓ Installing StorageClass
✓ Joining worker nodes
Set kubectl context to "kind-xybdiy"
You can now use your cluster with:
kubectl cluster-info --context kind-xybdiy
Have a nice day!
[root@Kind ~]# kubectl cluster-info --context kind-xybdiy
Kubernetes control plane is running at https://127.0.0.1:45025
CoreDNS is running at https://127.0.0.1:45025/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@Kind ~]# kind get clusters
xybdiy
[root@Kind ~]# kind get nodes -A
xybdiy-worker
xybdiy-worker2
xybdiy-control-plane
[root@Kind ~]# kind get clusters
xybdiy
[root@Kind ~]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-5hxw8 1/1 Running 0 11m
kube-system coredns-5d78c9869d-ttwsq 1/1 Running 0 11m
kube-system etcd-xybdiy-control-plane 1/1 Running 0 12m
kube-system kindnet-9ss5b 1/1 Running 0 11m
kube-system kindnet-h875t 1/1 Running 0 11m
kube-system kindnet-xqxsj 1/1 Running 0 11m
kube-system kube-apiserver-xybdiy-control-plane 1/1 Running 0 12m
kube-system kube-controller-manager-xybdiy-control-plane 1/1 Running 0 12m
kube-system kube-proxy-28sv8 1/1 Running 0 11m
kube-system kube-proxy-bb5z7 1/1 Running 0 11m
kube-system kube-proxy-cwd2k 1/1 Running 0 11m
kube-system kube-scheduler-xybdiy-control-plane 1/1 Running 0 12m
local-path-storage local-path-provisioner-6bc4bddd6b-t6b9l 1/1 Running 0 11m
1️⃣https://github.com/kubernetes-sigs/kind/releases
2️⃣https://github.com/kubernetes-sigs/kind/