系统安装:
系统配置,理论上一般配置都可以,推荐
内存 4G以上
CPU 4个以上
硬盘 100G以上。
系统镜像: CentOS-7-x86_64-DVD-1804.iso
安装分组为: 带GUI的服务器
需要准备五台服务器,分别安装上面的系统和分组,并配置ip 和主机名,主机名如下:
10.1.123.202 master1.neokylin.com.cn
10.1.123.203 master2.neokylin.com.cn
10.1.123.204 etcd1.neokylin.com.cn
10.1.123.205 node1.neokylin.com.cn
10.1.123.206 node2.neokylin.com.cn
在每台主机上配置/etc/hosts:
[root@master ~] cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.1.123.202 master1.neokylin.com.cn
10.1.123.203 master2.neokylin.com.cn
10.1.123.204 etcd1.neokylin.com.cn
10.1.123.205 node1.neokylin.com.cn
10.1.123.206 node2.neokylin.com.cn
分别添加各个主机对master主机的信任(master主机上运行):
[root@master ~]# ssh-keygen
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub master1.neokylin.com.cn
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub master2.neokylin.com.cn
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub etcd1.neokylin.com.cn
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node1.neokylin.com.cn
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node2.neokylin.com.cn
在每个主机上都安装yum源(在所有主机上运行):
[root@master ~] yum install centos-release-openshift-origin.noarch
master节点上安装:
[root@ ]yum install openshift-ansible
在所有节点上运行:
[root@master ~]yum install docker
设置Docker的registry的访问网络范围(在所有主机上运行):
[root@master ~]sed -i '/OPTIONS=.*/c\OPTIONS="--log-driver=journald --insecure-registry 172.30.0.0/16"' /etc/sysconfig/docker
[root@master ~]systemctl restart docker
在master节点进行批量安装部署:
配置ansible文件:
[root@master ~]cat /etc/ansible/hosts
# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups.
# The lb group lets Ansible configure HAProxy as the load balancing solution.
# Comment lb out if your load balancer is pre-configured.
[OSEv3:children]
masters
nodes
etcd
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
ansible_ssh_user=root
openshift_deployment_type=origin
# Uncomment the following to enable htpasswd authentication; defaults to
# DenyAllPasswordIdentityProvider.
#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Native high availbility cluster method with optional load balancer.
# If no lb group is defined installer assumes that a load balancer has
# been preconfigured. For installation the value of
# openshift_master_cluster_hostname must resolve to the load balancer
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
openshift_enable_service_catalog=false
template_service_broker_install=false
openshift_public_ip=10.1.123.202
# apply updated node defaults
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# enable ntp on masters to ensure proper failover
openshift_clock_enabled=true
# host group for masters
[masters]
master1.neokylin.com.cn
master2.neokylin.com.cn
# host group for etcd
[etcd]
etcd1.neokylin.com.cn
# host group for nodes, includes region info
[nodes]
master[1:2].neokylin.com.cn
node1.neokylin.com.cn openshift_node_labels="{'region': 'primary', 'zone': 'east'}"
node2.neokylin.com.cn openshift_node_labels="{'region': 'primary', 'zone': 'west'}"
#测试ping:
[root@master1 master]# ansible all -m ping
node2.neokylin.com.cn | SUCCESS => {
"changed": false,
"ping": "pong"
}
node1.neokylin.com.cn | SUCCESS => {
"changed": false,
"ping": "pong"
}
master2.neokylin.com.cn | SUCCESS => {
"changed": false,
"ping": "pong"
}
master1.neokylin.com.cn | SUCCESS => {
"changed": false,
"ping": "pong"
}
etcd1.neokylin.com.cn | SUCCESS => {
"changed": false,
"ping": "pong"
}
#使用ansible 部署环境
# ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
PLAY RECAP ***************************************************************************************************************************************************
etcd1.origin.com.cn : ok=37 changed=6 unreachable=0 failed=0
localhost : ok=11 changed=0 unreachable=0 failed=0
master1.neokylin.com.cn : ok=71 changed=16 unreachable=0 failed=0
master2.neokylin.com.cn : ok=72 changed=16 unreachable=0 failed=0
etcd1.neokylin.com.cn : ok=73 changed=16 unreachable=0 failed=0
node1.neokylin.com.cn : ok=60 changed=15 unreachable=0 failed=0
node2.neokylin.com.cn : ok=61 changed=15 unreachable=0 failed=0
INSTALLER STATUS *********************************************************************************************************************************************
Initialization : Complete (0:00:25)
# ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml
PLAY RECAP ***************************************************************************************************************************************************
etcd1.neokylin.com.cn : ok=76 changed=7 unreachable=0 failed=0
localhost : ok=12 changed=0 unreachable=0 failed=0
master1.neokylin.com.cn : ok=435 changed=67 unreachable=0 failed=0
master2.neokylin.com.cn : ok=271 changed=38 unreachable=0 failed=0
node1.neokylin.com.cn : ok=133 changed=14 unreachable=0 failed=0
node2.neokylin.com.cn : ok=133 changed=14 unreachable=0 failed=0
INSTALLER STATUS *********************************************************************************************************************************************
Initialization : Complete (0:01:07)
Health Check : Complete (0:00:15)
etcd Install : Complete (0:01:05)
Master Install : Complete (0:02:59)
Master Additional Install : Complete (0:00:35)
Node Install : Complete (0:01:42)
Hosted Install : Complete (0:11:55)
Web Console Install : Complete (0:00:46)
安装完成后操作:
[root@master1 master]# oc get nodes
NAME STATUS ROLES AGE VERSION
master1.neokylin.com.cn Ready master 8h v1.9.1+a0ce1bc657
master2.neokylin.com.cn Ready master 8h v1.9.1+a0ce1bc657
node1.neokylin.com.cn Ready 8h v1.9.1+a0ce1bc657
node2.neokylin.com.cn Ready 8h v1.9.1+a0ce1bc657
登录:
[root@master1 master]# oc login -u system:admin -n default
Logged into "https://master1.neokylin.com.cn:8443" as "system:admin" using existing credentials.
You have access to the following projects and can switch between them with 'oc project ':
* default
kube-public
kube-system
logging
management-infra
openshift
openshift-infra
openshift-node
openshift-web-console
Using project "default".
这些namespaces在界面登陆默认是看不到的,可以执行如下给admin赋权。
[root@master ~]# oadm policy add-cluster-role-to-user cluster-admin admin
Web登陆:
默认是通过主机名来访问的,我们可以将主机名修改成IP访问,运行如下命令,将主机名修改为ip即可:
[root@master ~]oc edit configmap/webconsole-config -n openshift-web-console
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
webconsole-config.yaml: |
apiVersion: webconsole.config.openshift.io/v1
clusterInfo:
consolePublicURL: https://10.1.123.202:8443/console/
loggingPublicURL: ''
logoutPublicURL: ''
masterPublicURL: https://10.1.123.202:8443
metricsPublicURL: ''
extensions:
properties: {}
scriptURLs: []
stylesheetURLs: []
features:
clusterResourceOverridesEnabled: false
inactivityTimeoutMinutes: 0
kind: WebConsoleConfiguration
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: /var/serving-cert/tls.crt
clientCA: ''
keyFile: /var/serving-cert/tls.key
maxRequestsInFlight: 0
namedCertificates: null
requestTimeoutSeconds: 0
kind: ConfigMap
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
修改配置文件:/etc/origin/master/master-config.yaml,将该文件中默认是通过主机名来访问的,我们可以将主机名修改成IP访问,修改结果如下:
...
masterPublicURL: https://10.1.123.202:8443
networkConfig:
clusterNetworkCIDR: 10.128.0.0/14
clusterNetworks:
- cidr: 10.128.0.0/14
hostSubnetLength: 9
externalIPNetworkCIDRs:
- 0.0.0.0/0
hostSubnetLength: 9
networkPluginName: redhat/openshift-ovs-subnet
serviceNetworkCIDR: 172.30.0.0/16
oauthConfig:
assetPublicURL: https://10.1.123.202:8443/console/
grantConfig:
method: auto
identityProviders:
- challenge: true
login: true
mappingMethod: claim
name: allow_all
provider:
apiVersion: v1
kind: AllowAllPasswordIdentityProvider
masterCA: ca-bundle.crt
masterPublicURL: https://10.1.123.202:8443
masterURL: https://10.1.123.202:8443
sessionConfig:
...
访问后,页面如下:
默认脚本创建三个DC都有些问题,我们需要重新创建:
[root@master1 master]# oc login -u system:admin -n default
[root@master1 master]# oc get dc
NAME REVISION DESIRED CURRENT TRIGGERED BY
docker-registry 1 0 0 config
registry-console 1 1 0 config
router 7 1 0 config
[root@master1 master]# oc get pods
NAME READY STATUS RESTARTS AGE
docker-registry-1-deploy 0/1 Pending 0 8h
registry-console-1-deploy 0/1 Error 0 8h
router-7-deploy 0/1 Pending 0 1h
需要把这些创建dc和svc删掉,相关的image:
[root@master1 master]# oc delete dc docker-registry registry-console router
[root@master1 master]# oc delete svc docker-registry registry-console router
1 新建registry:
1) 在etcd的主机上,配置nfs 存储服务:
[root@etcd1 ~]# mkdir /openshift-registry
[root@etcd1 ~]# chmod 777 /openshift-registry/ -R
[root@etcd1 /]# cat /etc/exports
/openshift-registry *(rw)
[root@etcd1 ~]# systemctl restart nfs
2)创建Registry:
a. 创建pv,创建存储卷前需要新建PV,pv 会绑定之前创建的nfs存储,下面红色字体为之前建立的nfs服务器地址和目录,registry-pv.yaml如下:
[root@master1 config]# oc get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
registry-pv 150Gi RWO Recycle Bound default/registry-pvc 2d
b. 创建pvc,pv创建完成后,只能通过pvc才能被Registry使用,所以需要创建pvc,registry-pvc.yaml的内容如下:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
[root@master1 config]# oc get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
registry-pvc Bound registry-pv 150Gi RWO 2d
c. 创建Registry
[root@master ~]# oc create serviceaccount registry -n default
[root@master ~]# oadm policy add-scc-to-user privileged system:serviceaccount:default:registry
[root@master ~]# oadm registry --service-account=registry
error: serviceaccounts "registry" already exists
error: rolebinding "registry-registry-role" already exists
deploymentconfig "docker-registry" created
service "docker-registry" created
d. 将pvc绑定到registry:
[root@master1]# oc volume deploymentconfigs docker-registry --add --name=registry-storage -t pvc --claim-name=registry-pvc --overwrite
info: deploymentconfigs "docker-registry" was not changed
e. 查看是否替换掉
[root@master1]# oc get dc docker-registry -o yaml
apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
metadata:
creationTimestamp: 2018-05-18T11:00:40Z
generation: 2
labels:
docker-registry: default
name: docker-registry
namespace: default
resourceVersion: "525368"
selfLink: /apis/apps.openshift.io/v1/namespaces/default/deploymentconfigs/docker-registry
uid: b3d9532f-5a8a-11e8-a3f1-002590c89d6e
spec:
replicas: 1
selector:
docker-registry: default
strategy:
activeDeadlineSeconds: 21600
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 25%
maxUnavailable: 25%
timeoutSeconds: 600
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
creationTimestamp: null
labels:
docker-registry: default
spec:
containers:
- env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_HTTP_NET
value: tcp
- name: REGISTRY_HTTP_SECRET
value: Yg2Fv0n8deKlSNEoyLUaDYBy0zGBktPvK9nTWWS7fM8=
- name: REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA
value: "false"
image: openshift/origin-docker-registry:v3.9.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 5000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: registry
ports:
- containerPort: 5000
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 5000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 100m
memory: 256Mi
securityContext:
privileged: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /registry
name: registry-storage
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: registry
serviceAccountName: registry
terminationGracePeriodSeconds: 30
volumes:
- name: registry-storage
persistentVolumeClaim:
claimName: registry-pvc
test: false
triggers:
- type: ConfigChange
status:
availableReplicas: 1
conditions:
- lastTransitionTime: 2018-05-18T11:00:50Z
lastUpdateTime: 2018-05-18T11:00:50Z
message: Deployment config has minimum availability.
status: "True"
type: Available
- lastTransitionTime: 2018-05-18T11:02:16Z
lastUpdateTime: 2018-05-18T11:02:18Z
message: replication controller "docker-registry-2" successfully rolled out
reason: NewReplicationControllerAvailable
status: "True"
type: Progressing
details:
causes:
- type: ConfigChange
message: config change
latestVersion: 2
observedGeneration: 2
readyReplicas: 1
replicas: 1
unavailableReplicas: 0
updatedReplicas: 1
[root@master1 config]# oc get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
docker-registry ClusterIP 172.30.78.75 5000/TCP 34m
kubernetes ClusterIP 172.30.0.1 443/TCP,53/UDP,53/TCP 3d
[root@master1 config]# docker tag docker.io/openshift/origin-web-console:v3.9.0 172.30.78.75:5000/openshift/origin-web-console:v3.9.0
[root@master1 config]# oc whoami -t
l-aRA9HnVLMrubtx4PQYMbBdyAtdu3rrB5HUxv6fx28
[root@master1 config]# docker login 172.30.78.75:5000 -u admin -p l-aRA9HnVLMrubtx4PQYMbBdyAtdu3rrB5HUxv6fx28
Login Succeeded
[root@master1 config]# docker push 172.30.78.75:5000/openshift/origin-web-console:v3.9.0
The push refers to a repository [172.30.78.75:5000/openshift/origin-web-console]
70f4a90fb558: Layer already exists
2b17522a9090: Layer already exists
dab57e0a2da4: Layer already exists
43e653f84b79: Pushed
v3.9.0: digest: sha256:3d209f6914dfc5f7e714f490e4d61b53766f317a0134911d79137e098f57fe26 size: 1160
[root@master1 config]# docker pull 172.30.78.75:5000/openshift/origin-web-console:v3.9.0
Trying to pull repository 172.30.78.75:5000/openshift/origin-web-console ...
v3.9.0: Pulling from 172.30.78.75:5000/openshift/origin-web-console
Digest: sha256:3d209f6914dfc5f7e714f490e4d61b53766f317a0134911d79137e098f57fe26
Status: Image is up to date for 172.30.78.75:5000/openshift/origin-web-console:v3.9.0
f. 给普通用户添加上传和下载镜像的权限.
#将user账户赋权(可访问registry,可上传下载镜像)
[root@master ~]# oadm policy add-role-to-user system:registry user
[root@master ~]# oadm policy add-role-to-user admin user
[root@master ~]# oadm policy add-role-to-user system:image-builder user
[root@master ~]# oadm policy add-cluster-role-to-user cluster-admin user
2)openshift添加httpd_pwd认证
[root@master1 ~]# htpasswd -c /home/users.htpasswd houjian
New password:
Re-type new password:
Adding password for user houjian
[root@master1 ~]# vim /etc/origin/master/master-config.yaml
需要替换的内容如下:
oauthConfig:
...
identityProviders:
- name: my_htpasswd_provider (1)
challenge: true (2)
login: true (3)
mappingMethod: claim (4)
provider:
apiVersion: v1
kind: HTPasswdPasswordIdentityProvider
file: /path/to/users.htpasswd (5)
1 This provider name is prefixed to provider user names to form an identity name.
2 When true, unauthenticated token requests from non-web clients (like the CLI) are sent a WWW-Authenticate challenge header for this provider.
3 When true, unauthenticated token requests from web clients (like the web console) are redirected to a login page backed by this provider.
4 Controls how mappings are established between this provider’s identities and user objects, as described above.
5 File generated using htpasswd.
[root@master1 master]# systemctl restart origin-master-api
3) 新建 Registry cosole: