1. 文档说明

    1. 编写目的

K8s安装 和k8s web监控

  1. 适用范围

本文档完成为这次调整具体的调整步骤和后期使用规范。

  1. 配置准备

系统centos 7
软件
k8s 1.5.2
https://dl.k8s.io/v1.5.2/kubernetes-server-linux-amd64.tar.gz
会产生11个二进制程序hyperkube kubectl kubelet kube-scheduler kubeadm kube-controller-manager kube-discovery kube-proxy kube-apiserver kube-dns kubefed
https://dl.k8s.io/v1.5.2/kubernetes-client-linux-amd64.tar.gz
会产生两个二进制程序kube-proxy kubefed
etcd 3.1.10
https://github.com/coreos/etcd/releases/download/v3.1.0/etcd-v3.1.0-linux-amd64.tar.gz
docker 1.13.1
https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz
flannel
https://github.com/coreos/flannel/releases/download/v0.7.0/flannel-v0.7.0-linux-amd64.tar.gz

  1. 服务器准备

节点
IP
序列号
K8s-master
192.168.203.196
K8s-node2
192.168.203.193
K8s-node1
192.168.203.192
2102350HND10G8000229

4. 

服务器防火墙调整

关闭selinux 关闭firewall

  1. 步骤

  1. CentOS 7.0默认使用的是firewall作为防火墙,使用iptables必须重新设置一下
  2. 1、直接关闭防火墙
  3. systemctl stop firewalld.service #停止firewall
  4. systemctl disable firewalld.service #禁止firewall开机启动
  5. 2、设置 iptables service
  6. yum -y install iptables-services
  7. 如果要修改防火墙配置,如增加防火墙端口3306
  8. vi /etc/sysconfig/iptables
  9. 增加规则
  10. -A INPUT -m state --state NEW -m tcp -p tcp --dport 3306 -j ACCEPT
  11. 保存退出后
  12. systemctl restart iptables.service #重启防火墙使配置生效
  13. systemctl enable iptables.service #设置防火墙开机启动
  14. 最后重启系统使设置生效即可。
  15. 关闭selinux
  16. Vim /etc/selinux/config

1.5 K8s-master安装

1,准备工作
1),系统最小化安装,然后yum update,升级到最新版本CentOS7.3.1611
2),设置hostname及hosts
[root@k8s-master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.203.196 k8s-master
192.168.203.192 k8s-node1
192.168.203.193 k8s-node2
3),校对时间
[root@k8s-master ~]# ntpdate ntp1.aliyun.com &&hwclock -w
1),部署etcd服务(目前单点)
[root@k8s-master ~]# tar zxvf etcd-v3.1.0-linux-amd64.tar.gz -C /usr/local/
[root@k8s-master ~]# mv /usr/local/etcd-v3.1.0-linux-amd64/ /usr/local/etcd
[root@k8s-master ~]# ln -s /usr/local/etcd/etcd /usr/local/bin/etcd
[root@k8s-master ~]# ln -s /usr/local/etcd/etcdctl /usr/local/bin/etcdctl
设置systemd服务文件/usr/lib/systemd/system/etcd.service
[Unit]
Description=Eted Server
After=network.target
[Service]
WorkingDirectory=/data/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/bin/etcd
Type=notify
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
其中WorkingDirector表示etcd数据保存的目录,需要在启动etcd服务之前进行创建
etcd单点默认配置
[root@k8s-master ~]# cat /etc/etcd/etcd.conf
ETCD_NAME=k8s1
ETCD_DATA_DIR="/data/etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
etcd服务启动
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable etcd.service
[root@k8s-master ~]# systemctl start etcd.service
etcd服务检查
[root@k8s-master ~]# etcdctl cluster-health
member 869f0c691c5458a3 is healthy: got healthy result from http://0.0.0.0:2379
cluster is healthy
[root@k8s-master ~]# etcdctl member list
869f0c691c5458a3: name=k8s1 peerURLs=http://172.17.3.20:2380 clientURLs=http://0.0.0.0:2379 isLeader=true
2)部署kube-apiserver服务
安装kube-apiserver
[root@k8s-master ~]# tar zxvf kubernetes-server-linux-amd64.tar.gz -C /usr/local/
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-apiserver /usr/local/bin/kube-apiserver
其他服务顺便做下软链接
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/hyperkube /usr/local/bin/hyperkube
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kubeadm /usr/local/bin/kubeadm
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-controller-manager /usr/local/bin/kube-controller-manager
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kubectl /usr/local/bin/kubectl
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-discovery /usr/local/bin/kube-discovery
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-dns /usr/local/bin/kube-dns
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kubefed /usr/local/bin/kubefed
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kubelet /usr/local/bin/kubelet
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-proxy /usr/local/bin/kube-proxy
[root@k8s-master ~]# ln -s /usr/local/kubernetes/server/bin/kube-scheduler /usr/local/bin/kube-scheduler
配置kubernetes system config
[root@k8s-master ~]# cat /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=false"
KUBE_LOG_DIR="--log-dir=/data/logs/kubernetes"
KUBE_LOG_LEVEL="--v=2"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.203.196:8080"
设置systemd服务文件/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/bin/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
配置kuber-apiserver启动参数
[root@k8s-master ~]# cat /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.203.196:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS=" "
启动kube-api-servers服务
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@k8s-master ~]# systemctl start kube-apiserver.service
验证服务
http://192.168.203.196:8080/
3)部署kube-controller-manager服务
设置systemd服务文件/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/bin/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_LOG_DIR \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
配置kube-controller-manager启动参数
[root@k8s-master ~]# cat /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS=""
启动kube-controller-manager服务
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable kube-controller-manager
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@k8s-master ~]# systemctl start kube-controller-manager
4)部署kube-scheduler服务
设置systemd服务文件/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/bin/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_LOG_DIR \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
配置kube-schedulerr启动参数
[root@k8s-master ~]# cat /etc/kubernetes/scheduler
KUBE_SCHEDULER_ARGS=""
启动kube-scheduler服务
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable kube-scheduler
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@k8s-master ~]# systemctl start kube-scheduler

6. 

Node节点安装

  1. yum install kubernetes-node flannel docker –y
  2. 关闭防火墙和校准时间
    3.更改配置文件
    [root@Resources-s1 k8s]# egrep -v '^#' /etc/kubernetes/config | grep -v '^$' KUBE_LOGTOSTDERR="--logtostderr=true"
    KUBE_LOG_LEVEL="--v=0"
    KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.203.196:8080"

[root@Resources-s1 k8s]# egrep -v '^#' /etc/kubernetes/kubelet | grep -v '^$' KUBELET_ADDRESS="--address=127.0.0.1" KUBELET_HOSTNAME="--hostname-override=k8s-node1" KUBELET_API_SERVER="--api-servers=http://192.168.203.196:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
[root@Resources-s1 k8s]# cat node_start.sh
systemctl enable kubelet kube-proxy
systemctl start kubelet kube-proxy
1.7 配置网络

1),配置etcd
[root@k8s-master ~]# etcdctl set /k8s/network/config '{ "Network": "10.1.0.0/16" }'
'{ "Network": "10.1.0.0/16" }'
[root@k8s-master ~]# etcdctl get /k8s/network/config

{ "Network": "10.1.0.0/16" }
1.8 node配置flanneld

[root@Resources-s1 k8s]# egrep -v '^#' /etc/sysconfig/flanneld | grep -v '^$' FLANNEL_ETCD="http://192.168.203.196:2379"
FLANNEL_ETCD_KEY="/k8s/network"
[root@Resources-s1 k8s]# cat flanneld_start.sh
systemctl enable flanenld systemctl restart flanneld
[root@Resources-s1 k8s]# cat docker_start.sh
systemctl enable docker systemctl restart docker

  1. 测试

检查集群启动情况
[root@Control k8s]# kubectl get nodes
NAME STATUS AGE
192.168.203.192 Ready 28m
192.168.203.193 Ready 25m

docker私有仓库建立环境说明我们选取192.168.203.196做私有仓库地址
yum install docker -y
1.启动docker仓库端口服务
docker run -d -p 5000:5000 --privileged=true -v /data/history:/data/registry registry
[root@Control docker_dw_images]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker.io/registry latest c9bd19d022f6 6 weeks ago 33.27 MB
2.查看docker仓库端口服务# curl -XGET http://192.168.203.196:5000/v2/_catalog# curl -XGET http://192.168.203.196:5000/v2/image_name/tags/list
3.将自己的镜像加到docker仓库1.1自己做基础镜像并加载到docker中
cd centos6-image && tar -c .
docker import - centos6-base
1.2 创建一个带ssh的基础镜像
mkdir centos6-ssh
cd centos6-ssh
vim Dockerfile
输入
FROM centos6-base
MAINTAINER wuqichao
RUN ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key
RUN ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key
RUN sed -ri 's/session required pam_loginuid.so/#session required pam_loginuid.so/g' /etc/pam.d/sshd
RUN mkdir -p /root/.ssh && chown root.root /root && chmod 700 /root/.ssh
EXPOSE 22
RUN echo 'root:xxx.com.cn'
chpasswd
ENV LANG en_US.UTF-8
ENV LC_ALL en_US.UTF-8
CMD /usr/sbin/sshd -D
保存退出
运行如下指令
docker build -t centos6-ssh .
不报错的话,就完成本地镜像
1.3 测试启动ssh的基础镜像
docker run -d -p 127.0.0.1:33333:22 centos6-ssh
1.4 登录ssh的基础镜像实例
ssh [email protected] -p 33333
2.加载到自己的私有仓库
###docker pull docker.io/nginx
如果是本地建立docker不用执行上面的
docker tag centos6-ssh 192.168.203.196:5000/centos6-ssh
docker push 192.168.203.196:5000/centos6-ssh
3.检查是否成功
[root@Control k8s]# curl -XGET http://192.168.203.196:5000/v2/_catalog
{"repositories":["centos6-ssh"]}
k8s中使用docker私有仓库环境设置1.1.设置服务端[root@Control k8s_yaml]# cat /etc/sysconfig/docker
grep 192.168.203.196OPTIONS='--insecure-registry 192.168.203.196:5000 --log-driver=journald'ADD_REGISTRY='--add-registry 192.168.203.196:5000'
1.2.设置客户端[root@Control k8s]# cat /etc/default/dockerDOCKER_OPTS="--insecure-registry 192.168.203.196:5000"
1.3.去掉权限验证
在/etc/kubernetes/apiserver中
去除 KUBE_ADMISSION_CONTROL中的 SecurityContextDeny,ServiceAccount,
并重启kube-apiserver.service服务
#systemctl restart kube-apiserver.service
1.4.加上DNS服务不然后报错
KUBELET_ARGS="--cluster-dns=192.168.203.196 --cluster-domain=playcrab-inc.com"
配置YAML2.0 常用指令
启动指令
kubectl create -f centos6-ssh/centos6-ssh.yaml
删除指令
kubectl delete -f centos6-ssh/centos6-ssh.yaml
查看指令
kubectl get pods
查看细节指令
kubectl describe pod centos6-ssh
2.1启动最简单的pod2.1.1 yaml配置
[root@Control k8s_yaml]# cat centos6-ssh/centos6-ssh.yaml
apiVersion: v1
kind: Pod
metadata: name: centos6-ssh
spec: containers: - name: centos6-ssh
image: centos6-ssh
2.1.2 查看指令
[root@Control k8s_yaml]# kubectl get pods
NAME READY STATUS RESTARTS AGE
centos6-ssh-mucsv 1/1 Running 0 10m
2.1.3 查看细节指令
kubectl describe pod centos6-ssh
[root@Control k8s_yaml]# kubectl describe pod centos6-sshName: centos6-ssh
Namespace: default
Node: 192.168.203.192/192.168.203.192
Start Time: Wed, 30 Nov 2016 13:44:51 -0500Labels:
Status: Running
IP: 10.1.75.2Controllers:
Containers: centos6-ssh:
Container ID: docker://7046491f05e3d549c198009f056b4e3e0508ad179712772bb296d0d08cc6ae29
Image: centos6-ssh
Image ID: docker://sha256:6525d364d418ae8dc854e6839dfaa653f2b6cd39c696a2f146bb918e69c20060
Port:
QoS Tier:
cpu: BestEffort
memory: BestEffort
State: Running
Started: Wed, 30 Nov 2016 13:44:52 -0500 Ready: True
Restart Count: 0
Environment Variables:
Conditions:
Type Status
Ready TrueNo volumes.
No events.
可以确认docker的实例跑在192.168.203.192这个NODE节点,分配到的集群内网IP为10.1.75.2
我们现在如果需要登录10.1.75.2要到192.168.203.192这个服务,ssh [email protected],才可以登录
2.2启动多份的pod2.2.1 yaml配置
我们定义了一个centos6-ssh pod×××,复制份数为2,使用centos6-ssh镜像。
[root@Control k8s_yaml]# cat test/centos6-ssh-rc.yamlapiVersion: v1
kind: ReplicationController
metadata: name: centos6-ssh
spec: replicas: 2 selector: name: centos6-ssh
template: metadata: labels: name: centos6-ssh
spec: containers: - name: centos6-ssh
image: centos6-ssh
ports: - containerPort: 22
2.2.2 查看指令
[root@Control k8s_yaml]# kubectl get pods
NAME READY STATUS RESTARTS AGE
centos6-ssh-mucsv 1/1 Running 0 10m
centos6-ssh-yoghv 1/1 Running 0 10m
2.2.3 查看细节指令
[root@Control k8s_yaml]# kubectl describe pod centos6-sshName: centos6-ssh-mucsv
Namespace: default
Node: 192.168.203.192/192.168.203.192
Start Time: Thu, 01 Dec 2016 11:04:24 -0500Labels: name=centos6-ssh
Status: Running
IP: 10.1.75.2Controllers: ReplicationController/centos6-ssh
Containers: centos6-ssh:
Container ID: docker://ba9327de6f067b46ce348f409e9efa2b44a9064c4f1ea508cf7d92ff9c450541
Image: centos6-ssh
Image ID: docker://sha256:6525d364d418ae8dc854e6839dfaa653f2b6cd39c696a2f146bb918e69c20060
Port: 22/TCP
QoS Tier:
memory: BestEffort
cpu: BestEffort
State: Running
Started: Thu, 01 Dec 2016 11:04:25 -0500 Ready: True
Restart Count: 0
Environment Variables:
Conditions:
Type Status
Ready TrueNo volumes.
Events:
FirstSeen LastSeen Count From SubobjectPath Type Reason Message

5h 5h 2 {kubelet 192.168.203.192} Warning MissingClusterDNS kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
5h 5h 1 {kubelet 192.168.203.192} spec.containers{centos6-ssh} Normal Pulling pulling image "centos6-ssh"
5h 5h 1 {kubelet 192.168.203.192} spec.containers{centos6-ssh} Normal Pulled Successfully pulled image "centos6-ssh"
5h 5h 1 {kubelet 192.168.203.192} spec.containers{centos6-ssh} Normal Created Created container with docker id ba9327de6f06
5h 5h 1 {kubelet 192.168.203.192} spec.containers{centos6-ssh} Normal Started Started container with docker id ba9327de6f06
3m 3m 1 {default-scheduler } Normal Scheduled Successfully assigned centos6-ssh-mucsv to 192.168.203.192
Name: centos6-ssh-yoghv
Namespace: default
Node: 192.168.203.193/192.168.203.193
Start Time: Thu, 01 Dec 2016 11:04:37 -0500Labels: name=centos6-ssh
Status: Running
IP: 10.1.68.2Controllers: ReplicationController/centos6-ssh
Containers: centos6-ssh:
Container ID: docker://221e4335774a8347a74fa7341f947954e3fb0eccff5fce7be427b532a4f5d31f
Image: centos6-ssh
Image ID: docker://sha256:6525d364d418ae8dc854e6839dfaa653f2b6cd39c696a2f146bb918e69c20060
Port: 22/TCP
QoS Tier:
cpu: BestEffort
memory: BestEffort
State: Running
Started: Thu, 01 Dec 2016 11:04:38 -0500 Ready: True
Restart Count: 0
Environment Variables:
Conditions:
Type Status
Ready FalseNo volumes.
Events:
FirstSeen LastSeen Count From SubobjectPath Type Reason Message


5h 5h 2 {kubelet 192.168.203.193} Warning MissingClusterDNS kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
5h 5h 1 {kubelet 192.168.203.193} spec.containers{centos6-ssh} Normal Pulling pulling image "centos6-ssh"
5h 5h 1 {kubelet 192.168.203.193} spec.containers{centos6-ssh} Normal Pulled Successfully pulled image "centos6-ssh"
5h 5h 1 {kubelet 192.168.203.193} spec.containers{centos6-ssh} Normal Created Created container with docker id 221e4335774a
5h 5h 1 {kubelet 192.168.203.193} spec.containers{centos6-ssh} Normal Started Started container with docker id 221e4335774a
3m 3m 1 {default-scheduler } Normal Scheduled Successfully assigned centos6-ssh-yoghv to 192.168.203.193
可以确认启动了两个实例
10.1.75.2实例在192.168.203.192上
10.1.68.2实例在192.168.203.193上
如果需要SSH连接上去操作还是需要登到各自的物理机上去才可操作
2.3启动内网可访问的services2.3.1 yaml配置
[root@Control k8s_yaml]# cat test/centos6-ssh-clusterip.yaml
apiVersion: v1
kind: Service
metadata: name: centos6-ssh-clusterip
spec: ports: - port: 2222 targetPort: 22 protocol: TCP
selector: name: centos6-ssh
selector中的name必须和rc或者pod保持一致
2.3.2 查看
[root@Control k8s_yaml]# kubectl get service
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
centos6-ssh-clusterip 10.254.155.14 2222/TCP 3s
kubernetes 10.254.0.1 443/TCP 1d
[root@Control k8s_yaml]# kubectl describe service centos6-ssh-clusteripName: centos6-ssh-clusterip
Namespace: default
Labels:
Selector: name=centos6-ssh
Type: ClusterIP
IP: 10.254.155.14Port: 2222/TCP
Endpoints: 10.1.68.2:22,10.1.75.2:22
Session Affinity: None
No events.
上面可以确认centos6-ssh-clusterip已经启动,分配到的IP为10.254.155.14,开启2222端口
代理Endpoints: 10.1.68.2:22,10.1.75.2:22
2.3.3 登录测试
[root@Resources-s1 ~]# telnet 10.254.155.14 2222
Trying 10.254.155.14...
Connected to 10.254.155.14.
Escape character is '^]'.
SSH-2.0-OpenSSH_5.3
^Cxx
Connection closed by foreign host.
QA:1.解决https问题
[root@Control k8s]# docker push 192.168.203.196:5000/centos6-ssh
The push refers to a repository [192.168.203.196:5000/centos6-ssh]
unable to ping registry endpoint https://192.168.203.196:5000/v0/
v2 ping attempt failed with error: Get https://192.168.203.196:5000/v2/: http: server gave HTTP response to HTTPS client
v1 ping attempt failed with error: Get https://192.168.203.196:5000/v1/_ping: http: server gave HTTP response to HTTPS client
要解决这个问题要在服务端和客户端改配置
服务端:
[root@Control k8s]# cat /etc/sysconfig/docker|grep 192.168.203.196
OPTIONS='--insecure-registry 192.168.203.196:5000 --log-driver=journald'
ADD_REGISTRY='--add-registry 192.168.203.196:5000'
客户端:
[root@Control k8s]# cat /etc/default/docker
DOCKER_OPTS="--insecure-registry 192.168.203.196:5000"
2.解决创建成功但是kubectl get pods 没有的问题Error from server: error when creating "nginx.yaml": Pod "nginx" is forbidden: no API token found for service account default/default, retry after the token is automatically created and added to the service account
要解决这个问题如下:
创建pod:

kubectl create -f nginx.yaml

此时有如下报错:
Error from server: error when creating "nginx.yaml": Pod "nginx" is forbidden: no API token found for service account default/default, retry after the token is automatically created and added to the service account
解决办法是编辑/etc/kubernetes/apiserver 去除 KUBE_ADMISSION_CONTROL中的SecurityContextDeny,ServiceAccount,并重启kube-apiserver.service服务:
#vim /etc/kubernetes/apiserver
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
#systemctl restart kube-apiserver.service
之后重新创建pod:

kubectl create -f nginx.yaml

pods/nginx
playcrab.com.cn

  1. ClusterDNS 出问题,pod不成功
    kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
    这样解决
    KUBELET_ARGS="--cluster-dns=192.168.203.196 --cluster-domain=playcrab-inc.com"
    1. Web ui安装

Docker pullgcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0
Docker tag gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 127.0.0.1:5000/ kubernetes-dashboard
进入k8s包目录
Cd /data/kubernetes/cluster/addons/dashboard
[root@k8s-master dashboard]# vim dashboard-service.yaml

This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-service.yaml

apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: kubernetes-dashboard
ports:

  • port: 80
    targetPort: 9090
    [root@k8s-master dashboard]# vim dashboard-service.yaml

    This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-controller.yaml

    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
    name: kubernetes-dashboard
    namespace: kube-system
    labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    spec:
    selector:
    matchLabels:
    k8s-app: kubernetes-dashboard
    template:
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ''
    scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
    spec:
    containers:

    • name: kubernetes-dashboard
      image: 127.0.0.1:5000/kubernetes-dashboard
      resources:

      keep request = limit to keep this container in guaranteed class

      limits:
      cpu: 100m
      memory: 50Mi
      requests:
      cpu: 100m
      memory: 50Mi
      ports:

      • containerPort: 9090
        livenessProbe:
        httpGet:
        path: /
        port: 9090
        initialDelaySeconds: 30
        timeoutSeconds: 30
        启动web
        Kubectl create –f dashboard-service.yaml
        Kubectl create –f dashboard-controller.yaml

注意 在启动flannel时,必须要先停止docker 等flannel启动后在启动docker
原因 flannel会覆盖docker0 网络
2