#openshift 3.11 安装部署
openshift安装部署
###1 环境准备(所有节点)
openshift 版本 v3.11
1.1 机器环境
ip cpu mem hostname OSsystem
172.16.1.91 4 8 node01 CentOS7.6
172.16.1.92 4 8 node02 CentOS7.6
172.16.1.93 4 8 node03 CentOS7.6
172.16.1.94 4 8 node04 CentOS7.6
172.16.1.95 4 8 node05 CentOS7.6
1.2 免密码ssh登陆
ssh-keygen
ssh-copy-id 172.16.1.91
ssh-copy-id 172.16.1.92
ssh-copy-id 172.16.1.93
ssh-copy-id 172.16.1.94
ssh-copy-id 172.16.1.95
1.3 hosts解析
vim /etc/hosts
172.16.1.91 node01
172.16.1.92 node02
172.16.1.93 node03
172.16.1.94 node04
172.16.1.95 node05
---------------------
scp -rp /etc/hosts 192.168.1.132:/etc/hosts
scp -rp /etc/hosts 192.168.1.135:/etc/hosts
1.4 selinux和关闭防火墙
#sed -i 's/SELINUX=.*/SELINUX=enforcing/' /etc/selinux/config
#sed -i 's/SELINUXTYPE=.*/SELINUXTYPE=targeted/' /etc/selinux/config
开放8443端口给openshift,api使用
/sbin/iptables -I INPUT -p tcp --dport 8443 -j ACCEPT &&\ service iptables save
1.2.3 安装需要的软件包
yum install -y wget git ntp net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct nfs-utils yum-utils docker NetworkManager
1.2.4 其他
sysctl net.ipv4.ip_forward=1
yum install pyOpenSSL httpd-tools -y
systemctl start NetworkManager
systemctl enable NetworkManager
配置镜像加速器
echo '{
"insecure-registries": ["172.30.0.0/16"],
"registry-mirrors": ["https://3aexnae3.mirror.aliyuncs.com"]
}' >/etc/docker/daemon.json
systemctl daemon-reload && \
systemctl enable docker && \
systemctl restart docker
1.2.5 镜像下载 ###2 配置ansible(主节点) git clone -b release-3.11 https://github.com/openshift/openshift-ansible.git //wget https://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin311/ansible-2.6.5-1.el7.noarch.rpm &&\ yum install ansible 2.2 配置文件 [root@master ~]# cat /etc/ansible/hosts [OSEv3:children] [OSEv3:vars] #指定ansible使用ssh的用户为root #指定方式为origin #指定版本为3.11 openshift_enable_service_catalog=false [masters] [etcd] [nodes] #openshift_enable_service_catalog=false # openshiftclock_enabled=true #如需重新安装,先卸载 ###4 安装后配置(主节点) systemctl restart rpcbind &&\ exportfs -rv #重新读取配置文件 kubectl apply -f pv-01-10.yml 配置文件参考章节最后 pv-01-10.yml 172.16.1.91 node01 账号密码是上面创建用户的账号密码 ###5 其他配置 https://172.16.1.91:9090 账号密码是机器的ssh账号密码 5.2 命令补全 #oc 自动补全 source ~/.bash_profile oc get svc -n default|grep docker-registry|awk '{print $3}' 将svc添加每台主机的hosts做对应的解析 ###6 其他 #### 移除旧的版本: vim /usr/lib/systemd/system/docker.service Docker指定容器的IP范围和DNS 解决容器无法访问宿主机端口的问题 The following table lists the playbooks in the order that they must run: Table 1. Individual Component Playbook Run Order ~/openshift-ansible/playbooks/openshift-checks/pre-install.yml Node Bootstrap ~/openshift-ansible/playbooks/openshift-node/bootstrap.yml etcd Install ~/openshift-ansible/playbooks/openshift-etcd/config.yml NFS Install ~/openshift-ansible/playbooks/openshift-nfs/config.yml Load Balancer Install ~/openshift-ansible/playbooks/openshift-loadbalancer/config.yml Master Install ~/openshift-ansible/playbooks/openshift-master/config.yml Master Additional Install ~/openshift-ansible/playbooks/openshift-master/additional_config.yml Node Join ~/openshift-ansible/playbooks/openshift-node/join.yml GlusterFS Install ~/openshift-ansible/playbooks/openshift-glusterfs/config.yml Hosted Install ~/openshift-ansible/playbooks/openshift-hosted/config.yml Monitoring Install ~/openshift-ansible/playbooks/openshift-monitoring/config.yml Web Console Install ~/openshift-ansible/playbooks/openshift-web-console/config.yml Admin Console Install ~/openshift-ansible/playbooks/openshift-console/config.yml Metrics Install ~/openshift-ansible/playbooks/openshift-metrics/config.yml metrics-server ~/openshift-ansible/playbooks/metrics-server/config.yml Logging Install ~/openshift-ansible/playbooks/openshift-logging/config.yml Availability Monitoring Install ~/openshift-ansible/playbooks/openshift-monitor-availability/config.yml Service Catalog Install ~/openshift-ansible/playbooks/openshift-service-catalog/config.yml Management Install ~/openshift-ansible/playbooks/openshift-management/config.yml Descheduler Install ~/openshift-ansible/playbooks/openshift-descheduler/config.yml Node Problem Detector Install ~/openshift-ansible/playbooks/openshift-node-problem-detector/config.yml Autoheal Install ~/openshift-ansible/playbooks/openshift-autoheal/config.yml Operator Lifecycle Manager (OLM) Install (Technology Preview) ~/openshift-ansible/playbooks/olm/config.yml
#master镜像列表(主节点)
echo 'docker.io/cockpit/kubernetes
docker.io/openshift/origin-haproxy-router
docker.io/openshift/origin-haproxy-router
docker.io/openshift/origin-service-catalog
docker.io/openshift/origin-node
docker.io/openshift/origin-deployer
docker.io/openshift/origin-control-plane
docker.io/openshift/origin-control-plane
docker.io/openshift/origin-template-service-broker
docker.io/openshift/origin-pod
docker.io/cockpit/kubernetes
docker.io/openshift/origin-web-console
quay.io/coreos/etcd' >image.txt && \
while read line; do docker pull $line ; done
#node镜像列表(两个node节点)
echo 'docker.io/openshift/origin-haproxy-router
docker.io/openshift/origin-node
docker.io/openshift/origin-deployer
docker.io/openshift/origin-pod
docker.io/ansibleplaybookbundle/origin-ansible-service-broker
docker.io/openshift/origin-docker-registry' >image.txt && \
while read line; do docker pull $line ; done
2.1 下载openshift-ansible代码
需要下载2.6.5版本的ansible
//yum localinstall ansible-2.6.5-1.el7.noarch.rpm -y && \
//yum install -y etcd &&\
//systemctl enable etcd &&\
//systemctl start etcd
https://docs.okd.io/3.11/install/example_inventories.html
[all]
# all下放所有机器节点的名称
node01
node02
node03
node04
node05
#这里放openshfit的角色,这里有三个角色,master,node,etcd
masters
nodes
etcd
#这里是openshfit的安装参数
ansible_ssh_user=root
openshift_deployment_type=origin
openshift_release=3.11
openshift_clock_enabled=true
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift_disable_check=disk_availability,docker_storage,memory_availability,docker_image_availability
#master角色的机器名称包含
node01
#etcd角色的机器名称包含
node01
#node角色的机器名称包含
#master openshift_node_group_name='node-config-all-in-one'
#node01 openshift_node_group_name='node-config-compute'
#node02 openshift_node_group_name='node-config-compute'
node01 openshift_node_group_name='node-config-master'
node02 openshift_node_group_name='node-config-compute'
node03 openshift_node_group_name='node-config-compute'
node04 openshift_node_group_name='node-config-infra'
node05 openshift_node_group_name='node-config-infra'
#gluster[1:6].example.com openshift_node_group_name='node-config-compute-storage'
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_nfs_directory=/data/docker
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=20Gi
# ansible_service_broker_install=false
3 使用ansible来进行安装
#安装前检查
ansible-playbook ~/openshift-ansible/playbooks/prerequisites.yml
#安装
ansible-playbook ~/openshift-ansible/playbooks/deploy_cluster.yml
#安装openshift-web-console
ansible-playbook ~/openshift-ansible/playbooks/openshift-web-console/config.yml
ansible-playbook ~/openshift-ansible/playbooks/adhoc/uninstall.yml
4.1 配置nfs持久卷
yum install nfs-utils rpcbind -y
mkdir -p /data/v0{01..20} /data/{docker,volume,registry}
chmod -R 777 /data
vim /etc/exports
/data 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v001 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v002 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v003 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v004 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v005 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v006 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v007 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v008 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v009 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v010 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/docker *(rw,sync,no_all_squash,no_root_squash)
systemctl restart nfs && \
systemctl enable rpcbind &&\
systemctl enable nfs
exportfs #检查 NFS 服务器是否挂载我们想共享的目录 /home:
rpcinfo -p #确认NFS服务器启动成功
exportfs -v #查看配置
4.2 创建openshift用户
oc login -u system:admin ##使用系统管理员用户登录
htpasswd -b /etc/origin/master/htpasswd admin 123456 ##创建用户
htpasswd -b /etc/origin/master/htpasswd dev dev ##创建用户
oc login -u admin ##使用用户登录
oc logout ##退出当前用户
4.3 赋予创建的用户集群管理员权限
oc login -u system:admin &&\
oc adm policy add-cluster-role-to-user cluster-admin admin
4.4 访问测试
需要添加hosts解析到本地电脑
172.16.1.92 node02
172.16.1.93 node03
172.16.1.94 node04
172.16.1.95 node05
http://node01:8443 admin/123456
5.1 部署集群节点管理cockpit
yum install -y cockpit cockpit-docker cockpit-kubernetes &&\
systemctl start cockpit &&\
systemctl enable cockpit.socket &&\
iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 9090 -j ACCEPT
#kubectl 命令补全
mkdir -p /usr/share/bash-completion/kubernetes
kubectl completion bash >/usr/share/bash-completion/kubernetes/bash_completion
echo 'source /usr/share/bash-completion/kubernetes/bash_completion' >>~/.bash_profile
mkdir -p /usr/share/bash-completion/openshift
oc completion bash >/usr/share/bash-completion/openshift/bash_completion
echo "source /usr/share/bash-completion/openshift/bash_completion" >> ~/.bash_profile
5.3 openshift登录
#admin用户登陆openshift:用户名dev 密码:dev
oc login -n openshift
#查看admin用户的token
oc whoami -t
#登录docker私库
docker login -u admin -p `oc whoami -t` docker-registry.default.svc:5000
通过观察service的docker-registry的IP
5.4 常用命令行操作
#master-restart api
#master-restart controllers
oc whoami -t ###查看当前用户token
oc login https://node01:8443 --token=`oc whoami -t` ###使用用户token登录
oc get nodes ###查看当前node节点状态
6.1 pv-01-10.yaml文件
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv001
labels:
name: pv001
type: nfs
spec:
nfs:
path: /data/v001
server: 172.16.1.91
capacity:
storage: 50Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv002
labels:
name: nfs-pv002
type: nfs
spec:
nfs:
path: /data/v002
server: 172.16.1.91
capacity:
storage: 50Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv003
labels:
name: nfs-pv003
type: nfs
spec:
nfs:
path: /data/v003
server: 172.16.1.91
capacity:
storage: 30Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv004
labels:
name: nfs-pv004
type: nfs
spec:
nfs:
path: /data/v004
server: 172.16.1.91
capacity:
storage: 30Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv005
labels:
name: nfs-pv005
type: nfs
spec:
nfs:
path: /data/v005
server: 172.16.1.91
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv006
labels:
name: nfs-pv006
type: nfs
spec:
nfs:
path: /data/v006
server: 172.16.1.91
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv007
labels:
name: nfs-pv007
type: nfs
spec:
nfs:
path: /data/v007
server: 172.16.1.91
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv008
labels:
name: nfs-pv008
type: nfs
spec:
nfs:
path: /data/v008
server: 172.16.1.91
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv009
labels:
name: nfs-pv009
type: nfs
spec:
nfs:
path: /data/v009
server: 172.16.1.91
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv010
labels:
name: nfs-pv010
type: nfs
spec:
nfs:
path: /data/v010
server: 172.16.1.91
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
===================================================================================================
#Centos7.6安装Docker_1.13.1
yum remove docker docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
#### 安装一些必要的系统工具:
yum install -y yum-utils device-mapper-persistent-data lvm2
#### 添加软件源信息 (可选)
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#### 更新 yum 缓存(可选)
yum makecache fast
#### 安装 Docker 1.13.1
yum -y install docker
#### CentOS7的Docker默认存储路径修改
mkdir -p /opt/docker/data
#### 找到以下内容
ExecStart=/usr/bin/dockerd-current \
# -g 或 --graph 或 --data-root,插入或修改后保存
-g /opt/docker/data
#### 修改后需要重载设置
systemctl daemon-reload
#### 启动Docker后台服务及开机启动
systemctl enable docker && systemctl start docker
#### 测试运行 hello-world
docker run hello-world
#### 镜像加速,修改/etc/docker/daemon.json内容:
{
"registry-mirrors": ["http://hub-mirror.c.163.com"]
}
#### 删除 Docker CE
yum remove docker-ce
rm -rf /var/lib/docker
https://www.runoob.com/docker/centos-docker-install.html
# 注意:default-address-pools 指定的是默认网络地址段范围,如果设为C级地址"5.5.5.0/24"则执行"docker network create mynet1"会出错,原因是不能再分派C级地址。
如果设为B级地址"5.5.5.0/16",则默认网络的第一个分配的IP地址为5.5.0.1,执行"docker network create mynet1 && docker netwrk inspect mynet1",
会看到mynet1网络范围为5.5.1.0/24,通过“--net mynet1”分派的第一个IP地址为5.5.1.1
echo -e '{
"authorization-plugins": [],
"dns":["172.16.250.15","168.63.129.16","8.8.8.8"],
"default-address-pools":[{"base":"5.5.0.0/16","size":24}]
}
' > /etc/docker/daemon.json
systemctl restart docker
# 创建自定义网络。注意:容器使用自定义网络,会导致/etc/resolv.conf变为原始值,无法使用宿主机的/etc/docker/daemon.json所设置的dns,需要手动修改,因此建议尽管不要用自定义网络。
docker network create --subnet=5.5.5.0/24 mynetwork1 --gateway=5.5.5.1
https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=5.5.0.0/
Playbook Name File Location
Health Check