转载https://blog.csdn.net/real_myth/article/details/78719244
1
2
|
#setenforce 0
#sed -i '/^SELINUX=/cSELINUX=disabled' /etc/sysconfig/selinux
|
1
|
# yum -y install etcd kubernetes
|
配置etcd。确保列出的这些项都配置正确并且没有被注释掉,下面的配置都是如此
1
2
3
4
5
6
|
#vim /etc/etcd/etcd.conf
ETCD_NAME=default
ETCD_DATA_DIR= "/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS= "http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS= "http://localhost:2379"
|
配置kubernetes
1
2
3
4
5
6
7
8
|
vim /etc/kubernetes/apiserver
KUBE_API_ADDRESS= "--address=0.0.0.0" KUBE_API_PORT= "--port=8080"
KUBELET_PORT= "--kubelet_port=10250"
KUBE_ETCD_SERVERS= "--etcd_servers=http://127.0.0.1:2379"
KUBE_SERVICE_ADDRESSES= "--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL= "--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS= ""
|
1
|
# for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
|
3.设置etcd网络
1
|
#etcdctl -C 10.0.0.81:2379 set /atomic.io/network/config '{"Network":"10.1.0.0/16"}'
|
1
|
# kubectl get nodes NAME LABELS STATUS
|
1
|
# yum -y install flannel kubernetes
|
配置kubernetes连接的服务端IP
1
2
3
|
#vim /etc/kubernetes/config
KUBE_MASTER= "--master=http://10.0.0.81:8080"
KUBE_ETCD_SERVERS= "--etcd_servers=http://10.0.0.81:2379"
|
配置kubernetes ,(请使用每台minion自己的IP地址比如10.0.0.81:代替下面的$LOCALIP)
1
2
3
4
5
|
#vim /etc/kubernetes/kubelet
KUBELET_PORT= "--port=10250"
# change the hostname to this host’s IP address KUBELET_HOSTNAME="--hostname_override=$LOCALIP"
KUBELET_API_SERVER= "--api_servers=http://10.0.0.81:8080"
KUBELET_ARGS= ""
|
1
2
3
4
5
|
# ifconfig docker0
Link encap:Ethernet HWaddr 02:42:B2:75:2E:67 inet addr:172.17.0.1 Bcast:0.0.0.0 Mask:255.255.0.0 UP
BROADCAST MULTICAST MTU:1500 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0
errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
|
warning:在运行过docker的机器上可以看到有docker0,这里在启动服务之前需要删掉docker0配置,在命令行运行:sudo ip link delete docker0
3.配置flannel网络
1
2
3
|
#vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS= "http://10.0.0.81:2379"
FLANNEL_ETCD_PREFIX= "/atomic.io/network"
|
1
|
# for SERVICES in flanneld kube-proxy kubelet docker; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
|
1
2
3
4
|
# kubectl get nodes
NAME STATUS AGE
10.0.0.82 Ready 1m
10.0.0.83 Ready 1m
|
可以看到配置的两台minion已经在master的node列表中了。如果想要更多的node,只需要按照minion的配置,配置更多的机器就可以了。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
|
# yaml格式的pod定义文件完整内容:
apiVersion: v1 #必选,版本号,例如v1
kind: Pod #必选,Pod
metadata: #必选,元数据
name: string #必选,Pod名称
namespace: string #必选,Pod所属的命名空间
labels: #自定义标签
- name: string #自定义标签名字
annotations: #自定义注释列表
- name: string
spec: #必选,Pod中容器的详细定义
containers: #必选,Pod中容器列表
- name: string #必选,容器名称
image: string #必选,容器的镜像名称
imagePullPolicy: [Always | Never | IfNotPresent] #获取镜像的策略 Alawys表示下载镜像 IfnotPresent表示优先使用本地镜像,否则下载镜像,Nerver表示仅使用本地镜像
command : [string] #容器的启动命令列表,如不指定,使用打包时使用的启动命令
args: [string] #容器的启动命令参数列表
workingDir: string #容器的工作目录
volumeMounts: #挂载到容器内部的存储卷配置
- name: string #引用pod定义的共享存储卷的名称,需用volumes[]部分定义的的卷名
mountPath: string #存储卷在容器内mount的绝对路径,应少于512字符
readOnly: boolean #是否为只读模式
ports: #需要暴露的端口库号列表
- name: string #端口号名称
containerPort: int #容器需要监听的端口号
hostPort: int #容器所在主机需要监听的端口号,默认与Container相同
protocol: string #端口协议,支持TCP和UDP,默认TCP
env : #容器运行前需设置的环境变量列表
- name: string #环境变量名称
value: string #环境变量的值
resources: #资源限制和请求的设置
limits: #资源限制的设置
cpu: string #Cpu的限制,单位为core数,将用于docker run --cpu-shares参数
memory: string #内存限制,单位可以为Mib/Gib,将用于docker run --memory参数
requests: #资源请求的设置
cpu: string #Cpu请求,容器启动的初始可用数量
memory: string #内存清楚,容器启动的初始可用数量
livenessProbe: #对Pod内个容器健康检查的设置,当探测无响应几次后将自动重启该容器,检查方法有exec、httpGet和tcpSocket,对一个容器只需设置其中一种方法即可
exec : #对Pod容器内检查方式设置为exec方式
command : [string] #exec方式需要制定的命令或脚本
httpGet: #对Pod内个容器健康检查方法设置为HttpGet,需要制定Path、port
path: string
port: number
host: string
scheme: string
HttpHeaders:
- name: string
value: string
tcpSocket: #对Pod内个容器健康检查方式设置为tcpSocket方式
port: number
initialDelaySeconds: 0 #容器启动完成后首次探测的时间,单位为秒
timeoutSeconds: 0 #对容器健康检查探测等待响应的超时时间,单位秒,默认1秒
periodSeconds: 0 #对容器监控检查的定期探测时间设置,单位秒,默认10秒一次
successThreshold: 0
failureThreshold: 0
securityContext:
privileged: false
restartPolicy: [Always | Never | OnFailure] #Pod的重启策略,Always表示一旦不管以何种方式终止运行,kubelet都将重启,OnFailure表示只有Pod以非0退出码退出才重启,Nerver表示不再重启该Pod
nodeSelector: obeject #设置NodeSelector表示将该Pod调度到包含这个label的node上,以key:value的格式指定
imagePullSecrets: #Pull镜像时使用的secret名称,以key:secretkey格式指定
- name: string
hostNetwork: false #是否使用主机网络模式,默认为false,如果设置为true,表示使用宿主机网络
volumes: #在该pod上定义共享存储卷列表
- name: string #共享存储卷名称 (volumes类型有很多种)
emptyDir: {} #类型为emtyDir的存储卷,与Pod同生命周期的一个临时目录。为空值
hostPath: string #类型为hostPath的存储卷,表示挂载Pod所在宿主机的目录
path: string #Pod所在宿主机的目录,将被用于同期中mount的目录
secret: #类型为secret的存储卷,挂载集群与定义的secre对象到容器内部
scretname: string
items:
- key: string
path: string
configMap: #类型为configMap的存储卷,挂载预定义的configMap对象到容器内部
name: string
items:
- key: string
path: string
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
apiVersion:v1
kind: Pod
metadata:
name: redis-php
label:
name: redis-php
spec:
containers:
- name: frontend
image: kubeguide /guestbook-php-frontend :localredis
ports:
- containersPort: 80
- name: redis-php
image:kubeguide /redis-master
ports:
- containersPort: 6379
|
1
2
3
|
#kubectl get gods
NAME READY STATUS RESTATS AGE
redis-php 2 /2 Running 0 10m
|
可以看到READY信息为2/2,表示Pod中的两个容器都成功运行了.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
[root@kubernetes-master ~] # kubectl describe redis-php
the server doesn't have a resource type "redis-php"
[root@kubernetes-master ~] # kubectl describe pod redis-php
Name: redis-php
Namespace: default
Node: kubernetes-minion /10 .0.0.23
Start Time: Wed, 12 Apr 2017 09:14:58 +0800
Labels: name=redis-php
Status: Running
IP: 10.1.24.2
Controllers:
Containers:
nginx:
Container ID: docker: //d05b743c200dff7cf3b60b7373a45666be2ebb48b7b8b31ce0ece9be4546ce77
Image: nginx
Image ID: docker-pullable: //docker .io /nginx @sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582
Port: 80 /TCP
State: Running
Started: Wed, 12 Apr 2017 09:19:31 +0800
|
1
2
3
4
5
|
#kubetctl delete pod static-web-node1
pod "static-web-node1" deleted
#kubectl get pods
NAME READY STATUS RESTARTS AGE
static-web-node1 0 /1 Pending 0 1s
|
1
2
|
#rm -f /etc/kubelet.d/static-web.yaml
#docker ps
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
apiVersion:v1
kind: Pod
metadata:
name: redis-php
label:
name: volume-pod
spec:
containers:
- name: tomcat
image: tomcat
ports:
- containersPort: 8080
volumeMounts:
- name: app-logs
mountPath: /usr/local/tomcat/logs
- name: busybox
image:busybox
command : [ "sh" , "-C" , "tail -f /logs/catalina*.log" ]
volumes:
- name: app-logs
emptyDir:{}
|
1
|
#kubectl logs volume-pod -c busybox
|
1
|
#kubectl exec -ti volume-pod -c tomcat -- ls /usr/local/tomcat/logs
|
1
2
3
4
5
6
7
8
|
# vim cm-appvars.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cm-appvars
data:
apploglevel: info
appdatadir: /var/data
|
1
2
|
#kubectl create -f cm-appvars.yaml
configmap "cm-appvars.yaml" created
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
#kubectl get configmap
NAME DATA AGE
cm-appvars 2 3s
[root@kubernetes-master ~] # kubectl describe configmap cm-appvars
Name: cm-appvars
Namespace: default
Labels:
Annotations:
Data
====
appdatadir: 9 bytes
apploglevel: 4 bytes
[root@kubernetes-master ~] # kubectl get configmap cm-appvars -o yaml
apiVersion: v1
data:
appdatadir: /var/data
apploglevel: info
kind: ConfigMap
metadata:
creationTimestamp: 2017-04-14T06:03:36Z
name: cm-appvars
namespace: default
resourceVersion: "571221"
selfLink: /api/v1/namespaces/default/configmaps/cm-appvars
uid: 190323cb-20d8-11e7-94ec-000c29ac8d83
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
apiVersion: v1
kind: ConfigMap
metadata:
name: cm-appvars
data:
key-serverxml:
.....
< /service >
< /Server >
key-loggingproperties:
"handlers=lcatalina.org.apache.juli.FileHandler,
...."
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
#vim cm-test-app.yaml
apiVersion: v1
kind: Pod
metadata:
name: cm- test -app
spec:
containers:
- name: cm- test -app
image: tomcat-app:v1
ports:
- containerPort: 8080
volumeMounts:
- name: serverxml #引用volume名
mountPath: /configfiles #挂载到容器内部目录
configMap:
name: cm- test -appconfigfile #使用configmap定义的的cm-appconfigfile
items:
- key: key-serverxml #将key=key-serverxml
path: server.xml #value将server.xml文件名进行挂载
- key: key-loggingproperties #将key=key-loggingproperties
path: logging.properties #value将logging.properties文件名进行挂载
|
1
2
|
#kubectl create -f cm-test-app.yaml
Pod "cm-test-app" created
|
1
2
3
|
#kubectl exec -ti cm-test-app -- bash
root@cm-rest-app:/ # cat /configfiles/server.xml
root@cm-rest-app:/ # cat /configfiles/logging.properties
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
apiVersion:v1
kind: Pod
metadata:
name: liveness- exec
label:
name: liveness
spec:
containers:
- name: tomcat
image: grc.io /google_containers/tomcat
args:
- /bin/sh
- -c
- echo ok > /tmp .health; sleep 10; rm -fr /tmp/health ; sleep 600
livenessProbe:
exec :
command :
- cat
- /tmp/health
initianDelaySeconds:15
timeoutSeconds:1
|
1
2
3
4
5
6
7
8
9
10
11
12
|
kind: Pod
metadata:
name: pod-with-healthcheck
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
tcpSocket:
port: 80
initianDelaySeconds:30
timeoutSeconds:1
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
apiVersion:v1
kind: Pod
metadata:
name: pod-with-healthcheck
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
httpGet:
path: /_status/healthz
port: 80
initianDelaySeconds:30
timeoutSeconds:1
|
1
|
#kubectllabel nodes k8s-node-1 zonenorth
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
apiVersion:v1
kind: Pod
metadata:
name: redis-master
label:
name: redis-master
spec:
replicas: 1
selector:
name: redis-master
template:
metadata:
labels:
name: redis-master
spec:
containers:
- name: redis-master
images: kubeguide /redis-master
ports:
- containerPort: 6379
nodeSelector:
zone: north
|
1
2
3
4
5
6
7
|
#kubectl scale rc redis-slave --replicas=3
ReplicationController "redis-slave" scaled
#kubectl get pods
NAME READY STATUS RESTARTS AGE
redis-slave-1sf23 1 /1 Running 0 1h
redis-slave-54wfk 1 /1 Running 0 1h
redis-slave-3da5y 1 /1 Running 0 1h
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
apiVersion: v1
kind: replicationController
metadata:
name: redis-master-v2
labels:
name: redis-master
Version: v2
spec:
replicas: 1
selector:
name: redis-master
Version: v2
template:
labels:
name: redis-master
Version: v2
spec:
containers:
- name: master
images: kubeguide /redis-master :2.0
ports:
- containerPort: 6379
|
需要注意的点:
1
|
#kubectl rolling-update redis-master -f redis-master-controller-v2.yaml
|
1
|
#kubectl rolling-update redis-master --image=redis-master:2.0
|