k8s 集群搭建(三台机器)


1.1设置系统主机名以及 Host 文件的相互解析

hostnamectl set-hostname k8s-master01

1.2安装依赖包

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

1.3设置防火墙为 Iptables 并设置空规则

systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables   && iptables -F && service iptables save

1.4关闭 SELINUX

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

1.5调整内核参数,对于 K8S

cat > kubernetes.conf <

1.6调整系统时区

# 设置系统时区为 中国/上海
timedatectl  set-timezone Asia/Shanghai 
# 将当前的 UTC 时间写入硬件时钟timedatectl 
timedatectl  set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog 
systemctl restart crond

1.7关闭系统不需要服务

systemctl stop postfix && systemctl disable postfix

1.8设置 rsyslogd systemd journald

mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <

1.9升级系统内核为 4.44

CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 DockerKubernetes 不稳定,例如: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt 
# 设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)" 
# 重启后安装内核源文件
yum --enablerepo=elrepo-kernel install kernel-lt-devel-$(uname -r) kernel-lt-headers-$(uname -r)

1.10关闭 NUMA

cp /etc/default/grub{,.bak}
vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数,如下所示:

GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off" 
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg

1.11kube-proxy开启ipvs的前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <

2.1安装 Docker 软件

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 

yum update -y && yum install -y docker-ce

## 创 建 /etc/docker 目 录
mkdir /etc/docker

# 配 置 daemon.
cat > /etc/docker/daemon.json <

3.1导入k8s所需要的文件(需要资料的加我QQ 1103225671)

#确保 /usr/local/kubernates/install 目录存在

scp * root@k8s-master01:/usr/local/kubernates/install
scp * root@k8s-master02:/usr/local/kubernates/install
scp * root@k8s-master03:/usr/local/kubernates/install

docker load -i haproxy.tar

docker load -i keepalived.tar

tar -zxvf  kubeadm-basic.images.tar.gz

vi load-images.sh

#!/bin/bash
cd /usr/local/kubernates/install/kubeadm-basic.images
ls /usr/local/kubernates/install/kubeadm-basic.images | grep -v load-images.sh > /tmp/k8s-images.txt
for i in $( cat  /tmp/k8s-images.txt )
do
    docker load -i $i
done
rm -rf /tmp/k8s-images.txt

sh load-images.sh

tar -zxvf  start.keep.tar.gz
mv data/ /
cd /data/lb/
vim etc/haproxy.cfg
  server rancher01 10.0.104.151:6443 #集群地址
  server rancher02 10.0.104.152:6443
  server rancher03 10.0.104.153:6443

vim start-haproxy.sh
MasterIP1=10.0.104.151
MasterIP2=10.0.104.152
MasterIP3=10.0.104.153

./start-haproxy.sh
 netstat -tunlp|grep 6444

vim start-keepalived.sh
VIRTUAL_IP=10.0.104.152   # 当前机器地址
INTERFACE=ens33           # 网卡名称

k8s 集群搭建(三台机器)_第1张图片

4.1安装 Kubeadm (主从配置)

cat  > /etc/yum.repos.d/kubernetes.repo <

4.2配置启动文件

kubeadm config print init-defaults > kubeadm-config.yaml
vi kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.104.152      #  主机地址
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master02
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.0.104.151:6444" #终端地址
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
  dnsDomain: cluster.local
  podSubnet: ""
  serviceSubnet: 10.96.0.0/12
scheduler: {}

4.3初始化主节点

kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

4.4Etcd 集群状态查看

kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl \
--endpoints=https://10.0.104.151:2379 \
--ca-file=/etc/kubernetes/pki/etcd/ca.crt \
--cert-file=/etc/kubernetes/pki/etcd/server.crt \
--key-file=/etc/kubernetes/pki/etcd/server.key cluster-health

kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml 
kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml

4.5部署网络

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false


kubectl apply -f kube-flannel.yml

发现报错plugin flannel does not support config version ,修改配置文件

vim /etc/cni/net.d/10-flannel.conflist
//加上cni的版本号
//文件内容如下
{
  "name": "cbr0",
  "cniVersion": "0.2.0",
  "plugins": [
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  ]
}

然后执行
systemctl daemon-reload

 

4.6查看节点

kubectl get nodes

4.7Java连接

    
        
            io.fabric8
            kubernetes-client
            4.9.0
        
        
            io.fabric8
            kubernetes-assertions
            4.0.0
            test
        
    
    public static void main(String[] args) {

        //修改环境变量,重新指定kubeconfig读取位置
        System.setProperty(Config.KUBERNETES_KUBECONFIG_FILE, "D:\\project\\k8s-demo\\src\\main\\resources\\config");

        Config config = new ConfigBuilder().withMasterUrl("10.0.104.152:6443").build();
        KubernetesClient client = new DefaultKubernetesClient(config);
        NamespaceList myNs = client.namespaces().list();

        ServiceList myServices = client.services().list();

        ServiceList myNsServices = client.services().inNamespace("default").list();

    }

 

你可能感兴趣的:(k8s,k8s)