$ kubeadm init
$ kubeadm join
Vagrant.configure("2") do |config|
(1..3).each do |i|
config.vm.define "k8s-node#{i}" do |node|
# 设置虚拟机的Box
node.vm.box = "centos/7"
# 设置虚拟机的主机名
node.vm.hostname="k8s-node#{i}"
# 设置虚拟机的IP
node.vm.network "private_network", ip: "192.168.56.#{99+i}", netmask: "255.255.255.0"
# 设置主机与虚拟机的共享目录
# node.vm.synced_folder "~/Documents/vagrant/share", "/home/vagrant/share"
# VirtaulBox相关配置
node.vm.provider "virtualbox" do |v|
# 设置虚拟机的名称 可以自己设置
v.name = "k8s-node#{i}"
# 设置虚拟机的内存大小
v.memory = 4096
# 设置虚拟机的CPU个数
v.cpus = 4
end
end
end
end
#k8s-node1 需要根据自己创建的名称进入
vagrant ssh k8s-node1
su root 密码为 vagrant
vi /etc/ssh/sshd_config
#修改
PasswordAuthentication yes/no
#重启服务
service sshd restart
#关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
#关闭 selinux:
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
#临时关闭 swap
swapoff -a
#永久关闭 swap
sed -ri 's/.*swap.*/#&/' /etc/fstab
#添加主机名与 IP 对应关系 需要换成自己虚拟机IP
vi /etc/hosts
10.0.2.15 k8s-node1
10.0.2.4 k8s-node2
10.0.2.5 k8s-node3
#将桥接的 IPv4 流量传递到 iptables 的链:
cat > /etc/sysctl.d/k8s.conf <.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
#设置完后,可以试着能不能通过域名 ping 通各个机器
ping k8s-node2
Kubernetes 默认 CRI(容器运行时)为 Docker,因此先安装 Docker。三台机器都需要安装。
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
#安装必须的依赖
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
#设置 docker repo 的 yum 位置
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/c
#安装 docker,以及 docker-cli
yum install -y docker-ce docker-ce-cli containerd.io
#配置 docker 加速 可用自己阿里云的镜像服务中的加速地址
mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://f2j2uztg.mirror.aliyuncs.com"]
}
EOF
{
"registry-mirrors": ["https://f2j2uztg.mirror.aliyuncs.com"]
}
#启动 docker & 设置 docker 开机自启
sudo systemctl daemon-reload
sudo systemctl restart docker
systemctl enable docker
#添加阿里云 yum 源
/etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装
yum install -y kubelet-1.17.3 kubeadm-1.17.3 kubectl-1.17.3
#设置自启动和启动服务
systemctl enable kubelet
systemctl start kubelet
#查看kubelet的状态 此时状态还不是启动状态 还有东西未配置完成。
systemctl status kubelet
#查看kubelet版本
kubelet --version
master节点初始化
#创建目录(省略),创建 master_images.sh,内容如下:
vi master_images.sh
#!/bin/bash
images=(
kube-apiserver:v1.17.3
kube-proxy:v1.17.3
kube-controller-manager:v1.17.3
kube-scheduler:v1.17.3
coredns:1.6.5
etcd:3.4.3-0
pause:3.1
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done
#修改为当前用户可执行权限
chmod 700 master_images.sh
#执行脚本
./master_images.sh
#查看下载了的镜像
docker images
#执行完毕后,后提示需要的后续操作,不要清屏,用于后续 从节点 绑定到master上
$ kubeadm init \
--apiserver-advertise-address=10.0.2.15 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.17.3 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16
测试Kubectl(主节点执行,从节点无需执行)
目前 master 状态为 notReady。等待网络加入完成即可。
#获取所有节点
kubectl get nodes
#查看kubelet日志
journalctl -u kubelet
运行上述初始化的提示信息。
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
在master节点上执行按照POD网络插件
#获取网络资源下载 flannel 不推荐
kubectl apply -f \
https://raw.githubusercontent.com/coreos/flanne/master/Documentation/kube-flannel.yml
#以上地址可能被墙,可以直接获取本地已经下载的kube-flannel.yml运行即可,如:
kubectl apply -f kube-flannel.yml
kube-flannel.yml内容如下,上传至master虚拟机
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
如果改网络插件安装失败,没有下面的效果,可以使用命令删除当前的网络插件。或者查看日志,报错信息就行排错。(博主:master节点的状态一直为NotReady,后使用 查看日志的方式进行排错,是kube-flannel的配置安装不成功,后来删除网络插件,换成上述版本的kube-flannel.yml解决)
#删除网络插件
kubectl deleted -f kube-flannel.yml
#查看日志,进行排错
journalctl -u kubelet -f
等待大约3分钟。才可启动安装成功
#查看命名空间
kubectl get ns
NAME STATUS AGE
default Active 30m
kube-node-lease Active 30m
kube-public Active 30m
kube-system Active 30m
#查看指定名称空间的pods
kubectl get pods -n kube-system
#查看所有名称空间的pods
kubectl get pods --all-namespaces
查看master上的节点信息
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node1 Ready master 34m v1.17.3 #status为ready才能够执行下面的命令
最后另外两台虚拟上,“k8s-node2”和“k8s-node3”上也执行初始化kubeadm后提示的命令:
kubeadm join 10.0.2.15:6443 --token c2wwkc.y0igkd4d406m0dpo \
--discovery-token-ca-cert-hash sha256:355cdbc0daa969295e7de43c20f623818f61367530b0ee632b26d85dfd7d182b
如果上述token使用时间过期,需要在master上重新生成一个,在把discovery-token-ca-cert-hash sha256指定的值替换即可。
kubeadm token create --print-join-command
#设置token 永不过期
kubeadm token create --ttl 0 --print-join-command
在master上查看所有节点状态。如果全部节点为ready,集群搭建成功。
#执行监控 pod 进度
watch kubectl get pod -n kube-system -o wide
#等 3-10 分钟,完全都是 running 以后使用 kubectl get nodes 检查状态
kubectl get nodes
#在主节点上部署一个tomcat
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8
#获取所有的资源
kubectl get all
#kubectl get pods -o wide 可以获取到tomcat部署信息,能够看到它被部署到了k8s-node3上了
kubectl get pods -o wide
#暴露pod内容器端口和 pod端口直接的映射 port指定pod端口 target-port 指定在pod中运行的容器端口
kubectl expose deployment tomcat6 --port=80 --target-port=8080 --type=NodePort
#查看服务
kubectl get svc
#查看服务详细信息 即可查看到虚拟机ip映射到pod的端口
kubectl get svc -o wide
#查看部署信息
kubectl get deployment
#动态扩容部署信息 扩充pod的运行数量
kubectl scale --replicas=3 deployment tomcat6
#扩容后查看pod的信息
kubectl get pods -o wide
#查看服务信息 服务信息只会有一个 pod有多个
kubectl get svc -o wide
#查看所有资源
kubectl get all
#删除deployment.apps/tomcat6
kubectl delete deployment.apps/tomcat6
#查看剩余的资源
kubectl get all
#删除service/tomcat6
kubectl delete service/tomcat6
#查看剩余的资源
kubectl get all
#查看帮助命令
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --help
#测试生成yaml 不会创建运行部署容器
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --dry-run -o yaml
#生成的yaml写入文件
kubectl create deployment tomcat6 --image=tomcat:6.0.53-jre8 --dry-run -o yaml > tomcat6.yaml
#编辑yaml 文件内容如下图 然后指定文件应用或更新创建运行唤醒
kubectl apply -f tomcat6.yaml
#再次查看pod详细信息
kubectl get pods -o wide
#将服务命令转成yaml文件存储
kubectl expose deployment tomcat6 --port=80 --target-port=8080 --type=NodePort --dry-run -o yaml > tomcat6-service.yaml
#具体内容请看图片 然后运行执行yaml文件创建服务
kubectl apply -f tomcat6-service.yaml
#插看所有信息
kubectl get all
#将其中的某个pod装成yaml存储 tomcat6-5f7ccf4cb9-fpf6h是pod的唯一标识
kubectl get pod tomcat6-5f7ccf4cb9-fpf6h -o yaml
#修改pod.yaml内容如下图 然后运行 查看结果
kubectl apply -f tomcat-pod.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "-"
# Here: "-"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
hostNetwork: true
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: siriuszg/nginx-ingress-controller:0.20.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 33
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
---
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx
namespace: ingress-nginx
spec:
#type: NodePort
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: https
port: 443
targetPort: 443
protocol: TCP
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
#创建 ingress.yaml文件
vi ingress-controller.yaml
#执行
kubectl apply -f ingress-controller.yaml
要根据对应的 Kubernetes的版本安装对应的helm版本。helm2.16.2下载地址,可下载后直接上传至虚拟机。
#命令下载 会比较慢
wget https://get.helm.sh/helm-v2.16.2-linux-amd64.tar.gz
解压压缩包和移动解压文件。
#解压文件到指定目录
tar -zxvf helm-v2.16.2-linux-amd64.tar.gz -C /mydata/helm
#进入解压目录
cd /mydata/helm
#移动文件至bin目录
sudo mv linux-amd64/helm /usr/local/bin/helm
#查看helm是否安装 能打印出信息即安装成功
helm
创建权限(master执行)
#创建yaml文件 内容在下面
vi helm-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
#执行
[root@k8s-node1 k8s]# kubectl apply -f helm-rbac.yaml
serviceaccount/tiller created
clusterrolebinding.rbac.authorization.k8s.io/tiller created
helm repo list
helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
#需要等待一段时间后才可初始化完成 --tiller-image 指定镜像,否则会被墙,等待节点上部署的tiller完成即可。
helm init --service-account=tiller --tiller-image=registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.16.3 --history-max 300
#检查版本
helm version
前提条件(同样也参考文档,一步一步来) 安装步骤官方文档
查看节点,确认 master 节点是否有 Taint,去掉 master 节点的 Taint。
#查看节点
kubectl get node -o wide
#确认 master 节点是否有 Taint
kubectl describe node master | grep Taint
#去掉 master 节点的 Taint。 之后在安装KubeSphere之前不要打开了
kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-
安装 OpenEBS
#创建命名空间
kubectl create ns openebs
#换镜像
helm repo add stable http://mirror.azure.cn/kubernetes/charts
#安装
helm install --namespace openebs --name openebs stable/openebs --version 1.5.0
#查看所有命名空间
kubectl get ns
#如果安装失败了可以删除helm中的应用 查看所有应用
helm ls --all
#删除helm中的应用 xxx 应用名称
helm del --purge xxx
4. 查看创建的 StorageClass,设置为 默认的 StorageClass
#安装 OpenEBS 后将自动创建 4 个 StorageClass,查看创建的 StorageClass:
kubectl get sc
#将 openebs-hostpath设置为 默认的 StorageClass
kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
5. master 节点 Taint 加上,避免业务相关的工作负载调度到 master 节点抢占 master 资源
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
最小化安装 KubeSphere,不用官网的安装方式,连接已经失效了。
#linux中新建 文件 内容如下
vim kubesphere-minimal.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-system
---
apiVersion: v1
data:
ks-config.yaml: |
---
persistence:
storageClass: ""
etcd:
#是否开启etcd 想开启可以设置为True
monitoring: False
endpointIps: 192.168.0.7,192.168.0.8,192.168.0.9
port: 2379
tlsEnable: True
common:
mysqlVolumeSize: 20Gi
minioVolumeSize: 20Gi
etcdVolumeSize: 20Gi
openldapVolumeSize: 2Gi
redisVolumSize: 2Gi
metrics_server:
enabled: False
console:
enableMultiLogin: False # enable/disable multi login
port: 30880
monitoring:
prometheusReplicas: 1
prometheusMemoryRequest: 400Mi
prometheusVolumeSize: 20Gi
grafana:
enabled: False
logging:
#是否开启日志功能 想开启可以设置为True
enabled: False
elasticsearchMasterReplicas: 1
elasticsearchDataReplicas: 1
logsidecarReplicas: 2
elasticsearchMasterVolumeSize: 4Gi
elasticsearchDataVolumeSize: 20Gi
logMaxAge: 7
elkPrefix: logstash
containersLogMountedPath: ""
kibana:
enabled: False
openpitrix:
enabled: False
devops:
#是否开启devops功能 之后会设置成true
enabled: False
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
sonarqube:
enabled: False
postgresqlVolumeSize: 8Gi
servicemesh:
enabled: False
notification:
enabled: False
alerting:
enabled: False
kind: ConfigMap
metadata:
name: ks-installer
namespace: kubesphere-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ks-installer
namespace: kubesphere-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: ks-installer
rules:
- apiGroups:
- ""
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apps
resources:
- '*'
verbs:
- '*'
- apiGroups:
- extensions
resources:
- '*'
verbs:
- '*'
- apiGroups:
- batch
resources:
- '*'
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiregistration.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- tenant.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- certificates.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- devops.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- '*'
verbs:
- '*'
- apiGroups:
- logging.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- jaegertracing.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- admissionregistration.k8s.io
resources:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ks-installer
subjects:
- kind: ServiceAccount
name: ks-installer
namespace: kubesphere-system
roleRef:
kind: ClusterRole
name: ks-installer
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
app: ks-install
spec:
replicas: 1
selector:
matchLabels:
app: ks-install
template:
metadata:
labels:
app: ks-install
spec:
serviceAccountName: ks-installer
containers:
- name: installer
image: kubesphere/ks-installer:v2.1.1
imagePullPolicy: "Always"
执行
kubectl apply -f kubesphere-minimal.yaml
验证与访问,查看滚动刷新的安装日志,请耐心等待(需要一定时间)安装成功。
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
如果日志中报错,可自行搜索错误信息进行解决。
如何重启安装。若安装过程中遇到问题,当您解决问题后,可以通过重启 ks-installer 的 Pod 来重启安装任务,将 ks-installer 的 Pod 删除即可让其自动重启:
#删除后会自动重新 ks-installer-xxxxxx-xxxxx 是pod的name
kubectl delete pod ks-installer-xxxxxx-xxxxx -n kubesphere-system
通过 kubectl get pod --all-namespaces查看 KubeSphere 相关 namespace 下所有 Pod 状态是否为 Running.确认 Pod 都正常运行后,可使用 IP:30880访问 KubeSphere UI 界面,默认的集群管理员账号为 admin/P@88w0rd。
安装后如何开启安装可插拔功能组件
#编辑 ks-installer 的pod 修改如下 想要开启其他类似的功能可自己根据情况开启,也可查看官网
kubectl edit cm -n kubesphere-system ks-installer
devops:
enabled: True
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
sonarqube:
enabled: True
保存退出,参考验证可插拔功能组件的安装 ,无需再次执行安装命令 ./install.sh,仅需通过查询 ks-installer 日志或 Pod 状态即可验证功能组件是否安装成功。
#查看pod的启动事件 如果pod不处于running状态可查看信息进行解决 默认都是在default上
kubectl describe po pod名称 -n 命名空间
#删除pod
kubectl deletepo pod pod名称
#查看容器日志
kubectl logs <pod-name> [-c <container-name>]
# 查看 Pod 的配置是否正确
kubectl get pod <pod-name> -o yaml
# 查看 Pod 的事件
kubectl describe pod <pod-name>
#获取 命名空间 kube-system 下的所有信息
kubectl get all -n kube-system
#删除其他命名空间下的部署信息
kubectl delete -n kube-system deployment.apps/kubernetes-dashboard
#编辑某个部署的yaml文件 保存退出后 即可自动重新部署
kubectl edit deploy ks-account -n kubesphere-system
#编辑某个pod的yaml 保存退出后 即可自动重新部署
kubectl edit cm -n kubesphere-system ks-installer