kubernetes v1.8 master单节点 无证书 部署

master节点部署

  1. 安装etcd
    yum -y install etcd
    vim /etc/etcd/etcd.conf
    修改这两项的监听地址为0.0.0.0
  ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
  ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"  

systemctl enable etcd
systemctl start etcd

  1. 安装kube-apiserver
    mkdir /etc/kubernetes
    vim /etc/kubernetes/kube-apiserver
# 启用日志标准错误 
 KUBE_LOGTOSTDERR="--logtostderr=true"

 # 日志级别
KUBE_LOG_LEVEL="--v=3"

 # Etcd服务地址
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.1.105:2379"   #修改实际的etcd地址

# 指定Etcd版本,默认etcd3
KUBE_ETCD_VERSION="--storage-backend=etcd3"

# API服务监听地址
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

# API服务监听端口
KUBE_API_PORT="--insecure-port=8080"

# Kubelet端口
NODE_PORT="--kubelet-port=10250"

# 对集群中成员提供API服务地址
KUBE_ADVERTISE_ADDR="--advertise-address=192.168.1.105"    #修改为master地址,也就是本机地址

# 允许容器请求特权模式,默认false
KUBE_ALLOW_PRIV="--allow-privileged=false"

# 集群分配的IP范围
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.10.10.0/24"

# 控制资源进入集群, 默认AlwaysAdmit
# Comma-delimited list of:
#   LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
#   NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit,
#   ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"

vim /lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver
ExecStart=/usr/bin/kube-apiserver \
        $KUBE_LOGTOSTDERR\
        $KUBE_LOG_LEVEL \
        $KUBE_ETCD_SERVERS \
        $KUBE_ETCD_VERSION \
        $KUBE_API_ADDRESS \
        $KUBE_API_PORT \
        $NODE_PORT \
        $KUBE_ADVERTISE_ADDR \
        $KUBE_ALLOW_PRIV \
        $KUBE_SERVICE_ADDRESSES \
        $KUBE_ADMISSION_CONTROL
Restart=on-failure
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

  1. 安装kube-controller-manager

vim /etc/kubernetes/kube-controller-manager

KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_MASTER="--master=192.168.1.105:8080"  #修改为实际的master地址
# 在执行主循环之前,先选举一个leader。高可用性运行组件时启用此功能,默认true 
KUBE_LEADER_ELECT="--leader-elect"

vim /lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager
ExecStart=/usr/bin/kube-controller-manager \
         $KUBE_LOGTOSTDERR \
         ${KUBE_LOG_LEVEL} \
         ${KUBE_MASTER} \
         ${KUBE_LEADER_ELECT}
Restart=on-failure
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

  1. 安装kube-scheduler

vim /etc/kubernetes/kube-scheduler

KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_MASTER="--master=192.168.1.105:8080"   #修改为实际地址
KUBE_LEADER_ELECT="--leader-elect"
# 其他参数
KUBE_SCHEDULER_ARGS=""

vim /lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler
ExecStart=/usr/bin/kube-scheduler \
          $KUBE_LOGTOSTDERR \
          $KUBE_LOG_LEVEL \
          $KUBE_MASTER \
          $KUBE_LEADER_ELECT \
          $KUBE_SCHEDULER_ARGS 
Restart=on-failure

[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

另一台服务器安装node节点

  1. 安装kubelet
    mkdir /etc/kubernetes

vim /etc/kubernetes/kubelet.kubeconfig

apiVersion: v1
kind: Config
clusters:
  - cluster:
      server: http://192.168.1.105:8080/   ##修改为实际master节点
    name: local
contexts:
  - context:
      cluster: local
    name: local
current-context: local

vim /etc/kubernetes/kubelet


# 启用日志标准错误 
KUBE_LOGTOSTDERR="--logtostderr=true"

# 日志级别 
KUBE_LOG_LEVEL="--v=4"

# Kubelet服务IP地址 
NODE_ADDRESS="--address=192.168.1.106"    #修改为node节点地址

# Kubelet服务端口。弃用
# NODE_PORT="--port=10250"

# 自定义节点名称
NODE_HOSTNAME="--hostname-override=192.168.1.106"   #修改为node节点地址

# kubeconfig路径,指定连接API服务器
KUBELET_KUBECONFIG="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"

# 允许容器请求特权模式,默认false
KUBE_ALLOW_PRIV="--allow-privileged=false"

# DNS信息
KUBELET_DNS_IP="--cluster-dns=10.10.10.2"
KUBELET_DNS_DOMAIN="--cluster-domain=cluster.local"

# 其他参数
KUBELET_ARGS=""

vim /lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet \
         $KUBE_LOGTOSTDERR \
         $KUBE_LOG_LEVEL \
         $NODE_ADDRESS \
         $NODE_PORT \
         $NODE_HOSTNAME \
         $KUBELET_KUBECONFIG \
         $KUBE_ALLOW_PRIV \
         $KUBELET_DNS_IP \
         $KUBELET_DNS_DOMAIN \
         $KUBELET_ARGS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet
systemctl status kubelet

  1. 安装kube-proxy
    vim /etc/kubernetes/kube-proxy
KUBE_LOGTOSTDERR="--logtostderr=true"

# 日志级别 
KUBE_LOG_LEVEL="--v=4"

# 自定义节点名称
NODE_HOSTNAME="--hostname-override=192.168.1.106"   #修改为node节点地址

# API服务地址 
KUBE_MASTER="--master=http://192.168.1.105:8080"    #修改为master地址

vim /lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/kube-proxy
ExecStart=/usr/bin/kube-proxy \
          $KUBE_LOGTOSTDERR \
          $KUBE_LOG_LEVEL \
          $NODE_HOSTNAME \
          $KUBE_MASTER 
Restart=on-failure

[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl start kube-proxy
systemctl enable kube-proxy

##验证
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.106 Ready 6d v1.8.15
192.168.1.107 Ready 6d v1.8.15

如上,获取到node节点便正常

注意事项

  1. kubernetes在有使用swap分区的情况下无法启动,在使用前记得关闭swap分区
  2. 如果启动正常, 却无法获取到node节点,请检查iptables是否关闭或者放行使用端口

你可能感兴趣的:(kubernetes)