离线环境安装docker,k8s,prometheus-operator 之k8s安装

篇幅四. k8s安装

(部分相同文件在此篇不列举,可见篇幅一,二)

1.k8s master安装

注:master 安装role并没有太多的测试,只是把所有步骤罗列进role里,直接用需谨慎

1.1 tree /home/jenkins/ansible_workspace
/home/jenkins/ansible_workspace/
├── environments
│   ├── colony
│   │   ├── inventory
│   │   └── vars.yml
├── roles
    ├── k8s_master_install
        ├── files
        │   └── update-kubeadm-cert.sh
        ├── meta
        │   └── main.yml
        ├── tasks
            ├── main.yml
1.2 cat /home/jenkins/ansible_workspace/roles/k8s_master_install/files/update-kubeadm-cert.sh
#!/bin/bash

set -o errexit
set -o pipefail
# set -o xtrace

log::err() {
  printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$@\n"
}

log::info() {
  printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[32mINFO: \033[0m$@\n"
}

log::warning() {
  printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[33mWARNING: \033[0m$@\n"
}

check_file() {
  if [[ ! -r  ${1} ]]; then
    log::err "can not find ${1}"
    exit 1
  fi
}

# get x509v3 subject alternative name from the old certificate
cert::get_subject_alt_name() {
  local cert=${1}.crt
  check_file "${cert}"
  local alt_name=$(openssl x509 -text -noout -in ${cert} | grep -A1 'Alternative' | tail -n1 | sed 's/[[:space:]]*Address//g')
  printf "${alt_name}\n"
}

# get subject from the old certificate
cert::get_subj() {
  local cert=${1}.crt
  check_file "${cert}"
  local subj=$(openssl x509 -text -noout -in ${cert}  | grep "Subject:" | sed 's/Subject:/\//g;s/\,/\//;s/[[:space:]]//g')
  printf "${subj}\n"
}

cert::backup_file() {
  local file=${1}
  if [[ ! -e ${file}.old-$(date +%Y%m%d) ]]; then
    cp -rp ${file} ${file}.old-$(date +%Y%m%d)
    log::info "backup ${file} to ${file}.old-$(date +%Y%m%d)"
  else
    log::warning "does not backup, ${file}.old-$(date +%Y%m%d) already exists"
  fi
}

# generate certificate whit client, server or peer
# Args:
#   $1 (the name of certificate)
#   $2 (the type of certificate, must be one of client, server, peer)
#   $3 (the subject of certificates)
#   $4 (the validity of certificates) (days)
#   $5 (the x509v3 subject alternative name of certificate when the type of certificate is server or peer)
cert::gen_cert() {
  local cert_name=${1}
  local cert_type=${2}
  local subj=${3}
  local cert_days=${4}
  local alt_name=${5}
  local cert=${cert_name}.crt
  local key=${cert_name}.key
  local csr=${cert_name}.csr
  local csr_conf="distinguished_name = dn\n[dn]\n[v3_ext]\nkeyUsage = critical, digitalSignature, keyEncipherment\n"

  check_file "${key}"
  check_file "${cert}"

  # backup certificate when certificate not in ${kubeconf_arr[@]}
  # kubeconf_arr=("controller-manager.crt" "scheduler.crt" "admin.crt" "kubelet.crt")
  # if [[ ! "${kubeconf_arr[@]}" =~ "${cert##*/}" ]]; then
  #   cert::backup_file "${cert}"
  # fi

  case "${cert_type}" in
    client)
      openssl req -new  -key ${key} -subj "${subj}" -reqexts v3_ext \
        -config <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -out ${csr}
      openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
        -extfile <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -days ${cert_days} -out ${cert}
      log::info "generated ${cert}"
    ;;
    server)
      openssl req -new  -key ${key} -subj "${subj}" -reqexts v3_ext \
        -config <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
      openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
        -extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
      log::info "generated ${cert}"
    ;;
    peer)
      openssl req -new  -key ${key} -subj "${subj}" -reqexts v3_ext \
        -config <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
      openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
        -extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
      log::info "generated ${cert}"
    ;;
    *)
      log::err "unknow, unsupported etcd certs type: ${cert_type}, supported type: client, server, peer"
      exit 1
  esac

  rm -f ${csr}
}

cert::update_kubeconf() {
  local cert_name=${1}
  local kubeconf_file=${cert_name}.conf
  local cert=${cert_name}.crt
  local key=${cert_name}.key

  # generate  certificate
  check_file ${kubeconf_file}
  # get the key from the old kubeconf
  grep "client-key-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${key}
  # get the old certificate from the old kubeconf
  grep "client-certificate-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${cert}
  # get subject from the old certificate
  local subj=$(cert::get_subj ${cert_name})
  cert::gen_cert "${cert_name}" "client" "${subj}" "${CAER_DAYS}"
  # get certificate base64 code
  local cert_base64=$(base64 -w 0 ${cert})

  # backup kubeconf
  # cert::backup_file "${kubeconf_file}"

  # set certificate base64 code to kubeconf
  sed -i 's/client-certificate-data:.*/client-certificate-data: '${cert_base64}'/g' ${kubeconf_file}

  log::info "generated new ${kubeconf_file}"
  rm -f ${cert}
  rm -f ${key}

  # set config for kubectl
  if [[ ${cert_name##*/} == "admin" ]]; then
    mkdir -p ${HOME}/.kube
    local config=${HOME}/.kube/config
    local config_backup=${HOME}/.kube/config.old-$(date +%Y%m%d)
    if [[ -f ${config} ]] && [[ ! -f ${config_backup} ]]; then
      cp -fp ${config} ${config_backup}
      log::info "backup ${config} to ${config_backup}"
    fi
    cp -fp ${kubeconf_file} ${HOME}/.kube/config
    log::info "copy the admin.conf to ${HOME}/.kube/config for kubectl"
  fi
}

cert::update_etcd_cert() {
  PKI_PATH=${KUBE_PATH}/pki/etcd
  CA_CERT=${PKI_PATH}/ca.crt
  CA_KEY=${PKI_PATH}/ca.key

  check_file "${CA_CERT}"
  check_file "${CA_KEY}"

  # generate etcd server certificate
  # /etc/kubernetes/pki/etcd/server
  CART_NAME=${PKI_PATH}/server
  subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
  cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-server" "${CAER_DAYS}" "${subject_alt_name}"

  # generate etcd peer certificate
  # /etc/kubernetes/pki/etcd/peer
  CART_NAME=${PKI_PATH}/peer
  subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
  cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-peer" "${CAER_DAYS}" "${subject_alt_name}"

  # generate etcd healthcheck-client certificate
  # /etc/kubernetes/pki/etcd/healthcheck-client
  CART_NAME=${PKI_PATH}/healthcheck-client
  cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-etcd-healthcheck-client" "${CAER_DAYS}"

  # generate apiserver-etcd-client certificate
  # /etc/kubernetes/pki/apiserver-etcd-client
  check_file "${CA_CERT}"
  check_file "${CA_KEY}"
  PKI_PATH=${KUBE_PATH}/pki
  CART_NAME=${PKI_PATH}/apiserver-etcd-client
  cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-etcd-client" "${CAER_DAYS}"

  # restart etcd
  docker ps | awk '/k8s_etcd/{print$1}' | xargs -r -I '{}' docker restart {} || true
  log::info "restarted etcd"
}

cert::update_master_cert() {
  PKI_PATH=${KUBE_PATH}/pki
  CA_CERT=${PKI_PATH}/ca.crt
  CA_KEY=${PKI_PATH}/ca.key

  check_file "${CA_CERT}"
  check_file "${CA_KEY}"

  # generate apiserver server certificate
  # /etc/kubernetes/pki/apiserver
  CART_NAME=${PKI_PATH}/apiserver
  subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
  cert::gen_cert "${CART_NAME}" "server" "/CN=kube-apiserver" "${CAER_DAYS}" "${subject_alt_name}"

  # generate apiserver-kubelet-client certificate
  # /etc/kubernetes/pki/apiserver-kubelet-client
  CART_NAME=${PKI_PATH}/apiserver-kubelet-client
  cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-kubelet-client" "${CAER_DAYS}"

  # generate kubeconf for controller-manager,scheduler,kubectl and kubelet
  # /etc/kubernetes/controller-manager,scheduler,admin,kubelet.conf
  cert::update_kubeconf "${KUBE_PATH}/controller-manager"
  cert::update_kubeconf "${KUBE_PATH}/scheduler"
  cert::update_kubeconf "${KUBE_PATH}/admin"
  # check kubelet.conf
  # https://github.com/kubernetes/kubeadm/issues/1753
  set +e
  grep kubelet-client-current.pem /etc/kubernetes/kubelet.conf > /dev/null 2>&1
  kubelet_cert_auto_update=$?
  set -e
  if [[ "$kubelet_cert_auto_update" == "0" ]]; then
    log::warning "does not need to update kubelet.conf"
  else
    cert::update_kubeconf "${KUBE_PATH}/kubelet"
  fi

  # generate front-proxy-client certificate
  # use front-proxy-client ca
  CA_CERT=${PKI_PATH}/front-proxy-ca.crt
  CA_KEY=${PKI_PATH}/front-proxy-ca.key
  check_file "${CA_CERT}"
  check_file "${CA_KEY}"
  CART_NAME=${PKI_PATH}/front-proxy-client
  cert::gen_cert "${CART_NAME}" "client" "/CN=front-proxy-client" "${CAER_DAYS}"

  # restart apiserve, controller-manager, scheduler and kubelet
  docker ps | awk '/k8s_kube-apiserver/{print$1}' | xargs -r -I '{}' docker restart {} || true
  log::info "restarted kube-apiserver"
  docker ps | awk '/k8s_kube-controller-manager/{print$1}' | xargs -r -I '{}' docker restart {} || true
  log::info "restarted kube-controller-manager"
  docker ps | awk '/k8s_kube-scheduler/{print$1}' | xargs -r -I '{}' docker restart {} || true
  log::info "restarted kube-scheduler"
  systemctl restart kubelet
  log::info "restarted kubelet"
}

main() {
  local node_tpye=$1

  KUBE_PATH=/etc/kubernetes
  CAER_DAYS=36500

  case ${node_tpye} in
    # etcd)
          # # update etcd certificates
    #   cert::update_etcd_cert
    # ;;
    master)
      # backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
      cert::backup_file "${KUBE_PATH}"
            # update master certificates and kubeconf
      cert::update_master_cert
    ;;
    all)
      # backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
      cert::backup_file "${KUBE_PATH}"
      # update etcd certificates
      cert::update_etcd_cert
      # update master certificates and kubeconf
      cert::update_master_cert
    ;;
    *)
      log::err "unknow, unsupported certs type: ${node_tpye}, supported type: all, master"
      printf "Documentation: https://github.com/yuyicai/update-kube-cert
  example:
    '\033[32m./update-kubeadm-cert.sh all\033[0m' update all etcd certificates, master certificates and kubeconf
      /etc/kubernetes
      ├── admin.conf
      ├── controller-manager.conf
      ├── scheduler.conf
      ├── kubelet.conf
      └── pki
          ├── apiserver.crt
          ├── apiserver-etcd-client.crt
          ├── apiserver-kubelet-client.crt
          ├── front-proxy-client.crt
          └── etcd
              ├── healthcheck-client.crt
              ├── peer.crt
              └── server.crt

    '\033[32m./update-kubeadm-cert.sh master\033[0m' update only master certificates and kubeconf
      /etc/kubernetes
      ├── admin.conf
      ├── controller-manager.conf
      ├── scheduler.conf
      ├── kubelet.conf
      └── pki
          ├── apiserver.crt
          ├── apiserver-kubelet-client.crt
          └── front-proxy-client.crt
"
      exit 1
    esac
}

main "$@"


1.3 cat /home/jenkins/ansible_workspace/roles/k8s_master_install/meta/main.yml
---
dependencies:
  - role: docker_install
  - role: k8s_system_init
1.4 cat /home/jenkins/ansible_workspace/roles/k8s_master_install/tasks/main.yml
---
- name: judge if need k8s master install
  shell: yum list installed | grep kubectl.x86_64 | grep 1.23.5 > /dev/null
  ignore_errors: True
  register: k8s_master
  tags: judge

- name: install k8s rpms
  shell: yum clean all && yum install -y kubelet-1.23.5 kubeadm-1.23.5 kubectl-1.23.5
  tags: k8s_install
  when: k8s_master is failed

- name: set kubectl bash completion
  lineinfile:
    path: /root/.bashrc
    regexp: '^source /usr/share/bash-completion/bash_completion$'
    line: 'source /usr/share/bash-completion/bash_completion'
    owner: root
    group: root
    mode: 0644
  tags: change_bashrc
  when: k8s_master is failed

- name: add kubectl bash completion to bashrc
  lineinfile:
    path: /root/.bashrc
    regexp: '^source <(kubectl completion bash)$'
    line: 'source <(kubectl completion bash)'
    owner: root
    group: root
    mode: 0644
  tags: change_bashrc
  when: k8s_master is failed

- name: config bridge nf call iptables to 1
  shell: "echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables"
  tags: change_bridge
  when: k8s_master is failed

- name: config bridge nf call iptables to 1
  shell: sed -i '/bridge-nf-call-iptables/d' /etc/profile
  tags: change_bridge
  when: k8s_master is failed

- name: config bridge nf call iptables to 1
  shell: echo 'echo 1 >/proc/sys/net/bridge/bridge-nf-call-iptables' >> /etc/profile
  tags: change_bridge
  when: k8s_master is failed

# 配置hosts解析
- name: configure /etc/hosts for k8s master
  shell: sed -i '/{{ ansible_host }}/d' /etc/hosts && hostname=`hostname` && echo {{ ansible_host }} $hostname >> /etc/hosts
  tags:
    - k8s
  when: k8s_master is failed

# master节点集群初始化
- name: create dir
  file: name=/home/kubernetes/k8s/cfg/dashboard state=directory owner=root group=root
  tags: create_dir
  when: k8s_master is failed

- name: init k8s master
  shell: "kubeadm init --apiserver-advertise-address={{ ansible_host }} --apiserver-bind-port=6443 --pod-network-cidr=10.244.0.0/16  --service-cidr=10.96.0.0/12 --kubernetes-version=1.23.5 --image-repository 1.1.1.1:5000 >/home/kubernetes/k8s/cfg/init.log"
  tags: init_master
  when: k8s_master is failed

- name: config kubectl config
  shell: "rm -rf /root/.kube && mkdir -p /root/.kube && cp -if /etc/kubernetes/admin.conf /root/.kube/config"
  tags: kubectl_config
  when: k8s_master is failed

- name: token_create
  shell: kubeadm token create --ttl 0
  tags: token_create
  when: k8s_master is failed

- name: ca_create
  shell: openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed  's/^.* //'
  tags: ca_create
  when: k8s_master is failed

- name: flannel create
  template: src=kube-flannel.yml dest=/home/kubernetes/k8s/cfg
  tags: cp_flannel
  when: k8s_master is failed

- name: flannel create
  shell: cd /home/kubernetes/k8s/cfg && kubectl apply -f kube-flannel.yml
  tags: flannel_create
  when: k8s_master is failed

- name: copy update k8s certs script
  copy: src=update-kubeadm-cert.sh dest=/home/kubernetes/k8s/script/update-kubeadm-cert.sh mode=0755
  tags: cp_script
  when: k8s_master is failed

- name: update k8s certs to 36500 days
  shell: "/home/kubernetes/k8s/script/update-kubeadm-cert.sh all"
  tags: update_certs
  when: k8s_master is failed

1.5 jenkins job参数
image.png
1.6 jenkins job shell 构建内容
ansibleHome='/home/jenkins/ansible_workspace'
cd ${ansibleHome}
ansible-playbook utils_install.yml  -i environments/${environment}/inventory -e "hosts=${hosts} user_name=${user_name} env=${environment} ansibleHome=${ansibleHome} util=k8s_master_install"

2.k8s node安装

2.1 tree /home/jenkins/ansible_workspace
/home/jenkins/ansible_workspace/
├── environments
│   ├── colony
│   │   ├── inventory
│   │   └── vars.yml
├── roles
    ├── k8s_node_install
        ├── meta
        │   └── main.yml
        ├── tasks
            └── main.yml
2.2 cat /home/jenkins/ansible_workspace/roles/k8s_node_install/meta/main.yml
---
dependencies:
  - role: docker_install
  - role: k8s_system_init
2.3 cat /home/jenkins/ansible_workspace/roles/k8s_node_install/tasks/main.yml
- name: judge if need k8s node install
  shell: yum list installed | grep kubectl.x86_64 | grep 1.23.5 > /dev/null
  ignore_errors: True
  register: k8s_node
  tags: judge

- name: install k8s rpms
  shell: yum clean all && yum install -y kubelet-1.23.5 kubeadm-1.23.5 kubectl-1.23.5
  tags: k8s_install
  when: k8s_node is failed

- name: set kubectl bash completion
  lineinfile:
    path: /root/.bashrc
    regexp: '^source /usr/share/bash-completion/bash_completion$'
    line: 'source /usr/share/bash-completion/bash_completion'
    owner: root
    group: root
    mode: 0644
  tags: change_bashrc
  when: k8s_node is failed

- name: add kubectl bash completion to bashrc
  lineinfile:
    path: /root/.bashrc
    regexp: '^source <(kubectl completion bash)$'
    line: 'source <(kubectl completion bash)'
    owner: root
    group: root
    mode: 0644
  tags: change_bashrc
  when: k8s_node is failed

- name: config bridge nf call iptables to 1
  shell: "echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables"
  tags: change_bridge
  when: k8s_node is failed

- name: config bridge nf call iptables to 1
  shell: sed -i '/bridge-nf-call-iptables/d' /etc/profile
  tags: change_bridge
  when: k8s_node is failed

- name: config bridge nf call iptables to 1
  shell: echo 'echo 1 >/proc/sys/net/bridge/bridge-nf-call-iptables' >> /etc/profile
  tags: change_bridge
  when: k8s_node is failed

- name: enable kubelet service
  service: name=kubelet enabled=yes daemon_reload=yes
  tags: enable_kubelet
  when: k8s_node is failed

- name: pull images flannel
  shell: docker pull 1.1.1.1:5000/mirrored-flannelcni-flannel:v0.19.1
  when: k8s_node is failed

- name: pull images flannel
  shell: docker pull 1.1.1.1:5000/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
  when: k8s_node is failed

- name: pull images proxy
  shell: docker pull 1.1.1.1:5000/kube-proxy:v1.23.5
  when: k8s_node is failed

- name: pull images apiserver
  shell: docker pull 1.1.1.1:5000/kube-apiserver:v1.23.5
  when: k8s_node is failed

- name: pull images manager
  shell: docker pull 1.1.1.1:5000/kube-controller-manager:v1.23.5
  when: k8s_node is failed

- name: pull images scheduler
  shell: docker pull 1.1.1.1:5000/kube-controller-manager:v1.23.5
  when: k8s_node is failed

- name: pull images coredns
  shell: docker pull 1.1.1.1:5000/coredns:v1.8.6
  when: k8s_node is failed

- name: pull images pause
  shell: docker pull 1.1.1.1:5000/pause:3.6
  when: k8s_node is failed

- name: pull images etcd
  shell: docker pull 1.1.1.1:5000/etcd:3.5.1-0
  when: k8s_node is failed
2.4 jenkins job参数
image.png
2.5 jenkins job shell 构建内容
ansibleHome="/home/jenkins/ansible_workspace"
master_ip=`echo $master`
master_name=`ssh root@$master_ip hostname`
hosts=`echo $hosts`

#update /etc/hosts
for host in `echo $hosts|sed 's/,/ /g'`;
  do
      node_name=`ssh root@$host hostname`
      ssh root@${master_ip} -n sed -i "/$master_ip/d" /etc/hosts
      ssh root@${master_ip} -n sed -i "/$host/d" /etc/hosts
      ssh root@${master_ip} -n "echo $host $node_name >> /etc/hosts"
      ssh root@${master_ip} -n "echo $master_ip $master_name >> /etc/hosts"
      ssh root@${host} -n sed -i "/$master_ip/d" /etc/hosts
      ssh root@${host} -n sed -i "/$host/d" /etc/hosts
      ssh root@${host} -n "echo $host $node_name >> /etc/hosts"
      ssh root@${host} -n "echo $master_ip $master_name >> /etc/hosts"
     
done   

cd ${ansibleHome}
ansible-playbook utils_install.yml  -i environments/${environment}/inventory -f5 -e "hosts=${hosts} user_name=${user_name} env=${environment} ansibleHome=${ansibleHome} util=k8s_node_install"

#join 安装 k8s master 步骤里会生成
command="kubeadm join 1.1.1.5:6443 --token 87ehkl.7m9bh772ckiqe9nn --discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxx"
for host in `echo $hosts|sed 's/,/ /g'`;
do
     ssh root@${host} -n "kubeadm reset -f"
     ssh root@${host} -n "${command}"   
done  

你可能感兴趣的:(离线环境安装docker,k8s,prometheus-operator 之k8s安装)