tags:
categories:
# 如无需更换版本,直接执行下载
wget https://get.helm.sh/helm-v3.2.4-linux-amd64.tar.gz
# 解压
tar -zxvf helm-v3.2.4-linux-amd64.tar.gz
# 进入到解压后的目录
cd linux-amd64/
cp helm /usr/local/bin/
# 赋予权限
chmod a+x /usr/local/bin/helm
# 查看版本
helm version
.
├── Chart.yaml
├── templates
| ├── deployment.yaml
| └── service.yaml
├── values.yaml
# 1. 新建一个文件夹demo存放chart
mkdir demo && cd demo && mkdir templates
# 2. 新建Chart.yaml 必须包含name和version两个字段
cat << EOF > Chart.yaml
name: hello-world
version: 1.0.0
EOF
# 3. 新建./templates/deployment.yaml
# 模板目录必须是templates 注意image部分可以使用变量的模板语法,可以动态插入
cat << EOF > ./templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
replicas: 1
selector:
matchLabels:
app: hello-world
template:
metadata:
labels:
app: hello-world
spec:
containers:
- name: hello-world
image: hub.qnhyn.com/library/myapp:v1
ports:
- containerPort: 80
protocol: TCP
EOF
# 4. 新建./templates/service.yaml
cat << EOF > ./templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: hello-world
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
protocol: TCP
selector:
app: hello-world
EOF
# 5. 新建values.yaml 这个用来动态导入的image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
# 这样做有个好处 只需要修改values.yaml文件 可以动态修改配置
#cat << EOF > values.yaml
#image:
# repository: lzw5399/tocgenerator
# tag: '951'
#EOF
# 将chart实例化成release
# 格式:helm install [RELEASE-NAME] [CHART-PATH]
helm install testname .
# 查看release
helm ls
# 查看历史
helm history <RELEASE_NAME>
# 安装成功!!
kubectl get pod
# 列出已经部署的Release
helm ls
# 查询一个特定的Release的状态
helm status <RELEASE_NAME>
# 查看被移除了,但保留了历史记录的release
helm ls --uninstalled
# 安装
helm install <RELEASE-NAME> <CHART-PATH>
# 命令行指定变量
helm install --set image.tag=233 <RELEASE-NAME> <CHART-PATH>
# 更新操作, flag是可选操作
helm upgrade [FLAG] <RELEASE> <CHART-PATH>
# 指定文件更新
helm upgrade -f myvalues.yaml -f override.yaml <RELEASE-NAME> <CHART-PATH>
# 命令行指定变量
helm upgrade --set foo=bar --set foo=newbar redis ./redis
# 移除Release
helm uninstall <RELEASE_NAME>
# 移除Release,但保留历史记录
# 可以通过以下查看:helm ls --uninstalled
# 可以通过以下回滚:helm rollback [REVISION]
helm uninstall <RELEASE_NAME> --keep-history
# 更新操作, flag是可选操作
helm rollback <RELEASE> [REVISION]
helm install --dry-run <RELEASE-NAME> <CHART-PATH>
# 添加helmhub上的dashboard官方repo仓库
helm repo add k8s-dashboard https://kubernetes.github.io/dashboard
# 查看添加完成后的仓库
helm repo list
# 查询dashboard的chart
helm search repo kubernetes-dashboard
# 新建文件夹用于保存chart
mkdir dashboard-chart && cd dashboard-chart
# 拉取chart
helm pull k8s-dashboard/kubernetes-dashboard
# 此时会有一个压缩包,解压它
tar -zxvf kubernetes-dashboard-2.3.0.tgz
# 进入到解压后的文件夹
cd kubernetes-dashboard
# 源文件安装 这里不用修改yaml
helm install -f values.yaml --namespace kube-system kubernetes-dashboard .
kubectl get pod -n kube-system
kubectl describe pod kubernetes-dashboard-7c4f68b554-h5l62 -n kube-system
kubectl get svc -n kube-system
# 把svc类型改成NodePort 当然也可以通过ingress类型进行分享
kubectl edit svc kubernetes-dashboard -n kube-system
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 执行完以上操作后,由于管理用户的名称为dashboard-admin,生成的对应的secret的值则为dashboard-admin-token-随机字符串
kubectl get secret -n=kube-system |grep dashboard-admin-token
kubectl describe -n=kube-system secret dashboard-admin-token-fk95t
mkdir -p /root/plugin/Prometheus && cd /root/plugin/Prometheus
git clone https://github.com/prometheus-operator/kube-prometheus.git
cd kube-prometheus/manifests
# 修改grafana-service.yaml文件, 使用nodepode方式访问grafana
vim grafana-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: grafana
name: grafana
namespace: monitoring
spec:
type: NodePort # 添加内容
ports:
- name: http
port: 3000
targetPort: http
nodePort: 30100 # 添加内容
selector:
app: grafana
# 修改prometheus-service.yaml,改为nodepode
apiVersion: v1
kind: Service
metadata:
labels:
prometheus: k8s
name: prometheus-k8s
namespace: monitoring
spec:
type: NodePort #添加内容
ports:
- name: web
port: 9090
targetPort: web
nodePort: 30200 #添加内容
selector:
app: prometheus
prometheus: k8s
sessionAffinity: ClientIP
# 修改alertmanager-service.yaml,改为nodepode
apiVersion: v1
kind: Service
metadata:
labels:
alertmanager: main
name: alertmanager-main
namespace: monitoring
spec:
type: NodePort #添加内容
ports:
- name: web
port: 9093
targetPort: web
nodePort: 30300 #添加内容
selector:
alertmanager: main
app: alertmanager
sessionAffinity: ClientIP
# 直接部署 docker版本19.03-ce这里k8s版本选择1.15.1 之前的1.18.0会有一个就绪探针的报错和Grafana的磁盘挂载报错 这里先做出效果以后在研究
kubectl create -f manifests/setup
kubectl create -f manifests/
# 查看pod是否完成
kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
alertmanager-main-0 2/2 Running 0 103s
alertmanager-main-1 2/2 Running 0 83s
alertmanager-main-2 2/2 Running 0 76s
grafana-7dc5f8f9f6-xb5ld 1/1 Running 0 109s
kube-state-metrics-5cbd67455c-rtgtm 4/4 Running 0 83s
node-exporter-9mr8k 2/2 Running 0 109s
node-exporter-jdmcd 2/2 Running 0 109s
node-exporter-vk769 2/2 Running 0 109s
prometheus-adapter-668748ddbd-8lxqx 1/1 Running 0 109s
prometheus-k8s-0 3/3 Running 1 74s
prometheus-k8s-1 3/3 Running 1 74s
prometheus-operator-7447bf4dcb-g4rbf 1/1 Running 0 110s
# 安装完之后测试
kubectl top node
kubectl top pod
访问prometheus。根据上面配置 prometheus对应的nodeport端口为30200,访问http://MasterIP:30200
通过Prometheus语言。获取CPU访问的请求数目。也可以自动画出时间波线图。
sum by (pod_name)( rate(container_cpu_usage_seconds_total{image!="", pod_name!=""}[1m]))
访问Grafana。访问http://MasterIP:30100 用户名和密码都是admin
Horizontal Pod Autoscaling可以根据CPU利用率自动伸缩一个Replication Controller、Deployment 或者Replica Set中的Pod数量
# 安装HPA拓展 复制到其他节点上
scp hpa-example.tar root@k8s-node2:~/
docker load -i hpa-example.tar
# 创建一个php-apache Pod
kubectl run php-apache --image=gcr.io/google_containers/hpa-example --requests=cpu=200m --expose --port=80
# 创建HPA控制器 cpu控制在50% 每个节点最少一个最多十个
kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
[root@k8s-master manifests]# kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
php-apache Deployment/php-apache 0%/50% 1 10 1 52s
# 增加负载,查看负载节点数目
kubectl run -i --tty load-generator --image=busybox /bin/sh
# 进入之后无限访问
while true; do wget -q -O- http://php-apache.default.SVC.cluster.local; done
# 新开一个窗口
kubectl get hpa
# 回收时候比创建时候慢 防止突然回收压死仅存的节点
spec:
containers:
- image: xxx
name: xxxx
resources:
limits: #最大
cpu: "4"
memory: 2Gi
requests: #初始
cpu: 250m
memory: 250Mi
apiVersion: v1
kind: ResourceQuota
metadata:
name: computer-resources
namespace: spark-cluster
spec:
hard:
pods: "20"
requests.cpu: "20"
requests.memory: 100Gi
limits.cpu: "40"
limits.memory: 200Gi
apiVersion: v1
kind: ResourceQuota
metadata:
name: object-counts
namespace: spark-cluster
spec:
hard:
configmaps: "10"
persistentvolumeclaims: "4"
replicationcontrollers: "20"
secrets: "10"
services: "10"
services.loadbalancers: "2"
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
spec:
limits:
- default:
memory: 50Gi
cpu: 5
defaultRequest:
memory: 1Gi
cpu: 1
type: Container
docker load -i elasticsearch-oss.tar
docker load -i fluentd-elasticsearch.tar
docker load -i kibana.tar
# 添加源helm2.14.1 k8s1.15.1 docker 19.03
cp linux-amd64/helm /usr/local/bin/
vim helm-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
helm init --service-account tiller --skip-refresh
helm init --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.14.1 --service-account=tiller --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo add stable http://mirror.azure.cn/kubernetes/charts/
helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
helm repo update
# helm es和kibana 的版本,必须一致
helm search incubator/elasticsearch -l
helm fetch incubator/elasticsearch --version 1.10.2
helm fetch stable/fluentd-elasticsearch --version 2.0.7
helm fetch stable/kibana --version 0.14.8
# 先下载镜像
docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.2
docker pull gcr.io/google-containers/fluentd-elasticsearch:v2.3.2
docker pu1l docker.elastic.co/kibana/kibana-oss:6.4.2
# 部署Elasticsearch 修改value.yaml,因为机器16G带不动所以设置为一。降低开销
MINIMUM_MASTER_NODES: "1"
client replicas: 1
master replicas: 1 enabled: false
data replicas: 1 enabled: false
# 当前目录下运行
kubectl create namespace efk
helm install --name els1 --namespace=efk -f values.yaml .
kubectl get pod -n efk
kubectl run cirror-$RANDOM --rm -it --image=cirros -- /bin/sh
# 看一下地址
kubectl get svc -n efk
# r这里看到三个地址 没有问题
curl Elasticsearch:Port/_cat/nodes # curl 10.102.204.238:9200/_cat/nodes
# 部署Fluentd
tar -zvxf fluentd-elasticsearch-2.0.7.tgz
# 修改values.yaml
host: '10.102.204.238'
helm install --name flu1 --namespace=efk -f values.yaml .
# 部署kibana
tar -zvxf kibana-0.14.8.tgz
# 修改vlues.yaml
elasticsearch.url: http://10.102.204.238:9200
helm install --name kib1 --namespace=efk -f values.yaml .
kubectl get pod -n efk
kubectl get svc -n efk
# 修改ClusterIP为NodePort方式可以在web直接访问
kubectl edit svc kib1-kibana -n efk
type: NodePort
# 通过web访问 可以通过时间检索和切片查看
http://192.168.1.10:31430/
# 进入证书目录
cd /etc/kubernetes/pki
# 查看apiserver证书的可用时间
openssl x509 -in apiserver.crt -text -noout # 一年
openssl x509 -in ca.crt -text -noout # 十年
mkdir ~/data && cd ~/data
# 下载golang源码: https://studygolang.com/dl go1.12.9.linux-amd64.tar.gz
tar -zvxf go1.12.9.linux-amd64.tar.gz -C /usr/local
vim /etc/profile
export PATH=$PATH:/usr/local/go/bin
source /etc/profile
go version
# 复制k8s源代码到本地
git clone https://github.com/kubernetes/kubernetes
# 切换分支
git checkout -b remotes/origin/release-1.15.1 v1.15.1
# 修改Kubeadm源码包更新证书策略
vim cmd/kubeadm/app/util/pkiutil/pki_helpers.go # kubeadm 1.14至今
# 添加创建证书模板常量
const duration365d = time.Hour * 24 * 365
NotAfter:time.Now().Add(duration365d).UTC(),
# 保存退出后编译
make WHAT=cmd/kubeadm GOFLAGS=-v
cp output/bin/kubeadm /root/
# 备份下
cp /usr/bin/kubeadm /usr/bin/kubeadm.old
cp kubeadm /usr/bin/
chmod a+X /usr/bin/kubeadm
cd /etc/kubernetes/
cp -r pki/ pki.old
# 重新生成所有证书文件
kubeadm alpha certs renew all --config=/usr/local/install-k8s/core/kubeadm-config.yaml
# 重新查看证书时间
cd /etc/kubernetes/pki
openssl x509 -in apiserver.crt -text -noout # 十年