1.wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml下载角色认证文件。kubectl apply -f rbac.yaml声明资源。
[root@master flannel]# wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
--2018-12-18 21:15:05-- https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 178.128.123.58, 2400:6180:0:d1::6e:5001
Connecting to docs.projectcalico.org (docs.projectcalico.org)|178.128.123.58|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2423 (2.4K) [application/x-yaml]
Saving to: ‘rbac.yaml’
100%[=============================================================================================>] 2,423 1.70KB/s in 1.4s
2018-12-18 21:15:13 (1.70 KB/s) - ‘rbac.yaml’ saved [2423/2423]
[root@master flannel]# kubectl apply -f rbac.yaml
clusterrole.rbac.authorization.k8s.io/calico created
clusterrole.rbac.authorization.k8s.io/flannel configured
clusterrolebinding.rbac.authorization.k8s.io/canal-flannel created
clusterrolebinding.rbac.authorization.k8s.io/canal-calico created
2.wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml下载网络策略文件。cat canal.yaml | grep -i image查看这个文件中使用的镜像。kubectl apply -f canal.yaml声明资源。
[root@master flannel]# wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml
--2018-12-18 21:22:59-- https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 206.189.89.118, 2400:6180:0:d1::6e:5001
Connecting to docs.projectcalico.org (docs.projectcalico.org)|206.189.89.118|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10344 (10K) [application/x-yaml]
Saving to: ‘canal.yaml’
100%[=============================================================================================>] 10,344 7.76KB/s in 1.3s
2018-12-18 21:23:03 (7.76 KB/s) - ‘canal.yaml’ saved [10344/10344]
[root@master flannel]# cat canal.yaml | grep -i image
image: quay.io/calico/node:v3.1.4
image: quay.io/calico/cni:v3.1.4
image: quay.io/coreos/flannel:v0.9.1
[root@master flannel]# kubectl apply -f canal.yaml
configmap/canal-config created
daemonset.extensions/canal created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
serviceaccount/canal created
3.kubectl get pods -n kube-system -o wide | grep canal查看系统空间启动的pod。mkdir networkpolicy准备测试用的文件目录。
[root@master flannel]# kubectl get pods -n kube-system -o wide | grep canal
canal-jk99m 3/3 Running 0 1m 172.20.0.129 node1.example.com
canal-khlvq 3/3 Running 0 1m 172.20.0.128 master.example.com
canal-vqvnt 3/3 Running 0 1m 172.20.0.130 node2.example.com
[root@master flannel]# mkdir networkpolicy
[root@master flannel]# cd networkpolicy
4.vim ingress-def.yaml创建定义ingress的资源文件。cat ingress-def.yaml查看文件(注意这里的namespace: dev说明了作用名称空间,policyTypes:中- Ingress为空,没有给授权策略则ingress无法通过网络策略进入)。
[root@master networkpolicy]# vim ingress-def.yaml
[root@master networkpolicy]# cat ingress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-ingress
namespace: dev
spec:
podSelector: {}
policyTypes:
- Ingress
5.kubectl create namespace dev创建名称空间。kubectl create namespace prod创建名称空间。kubectl apply -f ingress-def.yaml -n dev声明名称空间资源。kubectl get netpol -n dev查看名称空间的网络策略。
[root@master networkpolicy]# kubectl create namespace dev
namespace/dev created
[root@master networkpolicy]# kubectl create namespace prod
namespace/prod created
[root@master networkpolicy]# kubectl apply -f ingress-def.yaml -n dev
networkpolicy.networking.k8s.io/deny-all-ingress created
[root@master networkpolicy]# kubectl get netpol -n dev
NAME POD-SELECTOR AGE
deny-all-ingress
6.vim pod-a.yaml编辑Pod资源文件。cat pod-a.yaml查看Pod资源文件。kubectl apply -f pod-a.yaml -n dev声明名称空间资源。kubectl get pods -n dev -o wide获取名称空间pod资源。curl 10.244.2.2无法访问pod(因为定义的网络策略是没有允许ingress)。
[root@master networkpolicy]# vim pod-a.yaml
[root@master networkpolicy]# cat pod-a.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod1
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
[root@master networkpolicy]# kubectl apply -f pod-a.yaml -n dev
pod/pod1 created
[root@master networkpolicy]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE
pod1 1/1 Running 0 19s 10.244.2.2 node2.example.com
[root@master networkpolicy]# curl 10.244.2.2
^C
7.kubectl apply -f pod-a.yaml -n prod在另一个名称空间声明资源。kubectl get pods -n prod -o wide获取Pod资源信息。curl 10.244.1.2可以访问pod(网络策略是默认的网络策略)。
[root@master networkpolicy]# kubectl apply -f pod-a.yaml -n prod
pod/pod1 created
[root@master networkpolicy]# kubectl get pods -n prod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
pod1 1/1 Running 0 10s 10.244.1.2 node1.example.com
[root@master networkpolicy]# curl 10.244.1.2
Hello MyApp | Version: v1 | Pod Name
8.vim ingress-def.yaml编辑文件。cat ingress-def.yaml查看文件(ingress:赋值 - {},表示接受所有策略,此时从外部就可以访问了)。kubectl apply -f ingress-def.yaml -n dev重新声明资源。curl 10.244.1.2可以访问。curl 10.244.2.2也可以访问。
[root@master networkpolicy]# vim ingress-def.yaml
[root@master networkpolicy]# cat ingress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-ingress
spec:
podSelector: {}
ingress:
- {}
policyTypes:
- Ingress
[root@master networkpolicy]# kubectl apply -f ingress-def.yaml -n dev
networkpolicy.networking.k8s.io/deny-all-ingress configured
[root@master networkpolicy]# curl 10.244.1.2
Hello MyApp | Version: v1 | Pod Name
[root@master networkpolicy]# curl 10.244.2.2
Hello MyApp | Version: v1 | Pod Name
9.vim allow-net-demo.yaml编辑文件。cat allow-net-demo.yaml查看文件(注意这里的ingress:参数),kubectl apply -f allow-net-demo.yaml -n dev声明资源。 curl 10.244.2.2可以访问80端口。curl 10.244.2.2:443访问443端口被网络策略所阻塞。
[root@master networkpolicy]# vim allow-net-demo.yaml
[root@master networkpolicy]# cat allow-net-demo.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-myapp-ingress
spec:
podSelector:
matchLabels:
app: myapp
ingress:
- from:
- ipBlock:
cidr: 10.244.0.0/16
except:
- 10.244.1.2/32
ports:
- protocol: TCP
port: 80
[root@master networkpolicy]# kubectl apply -f allow-net-demo.yaml -n dev
networkpolicy.networking.k8s.io/allow-myapp-ingress created
[root@master networkpolicy]# kubectl get netpol -n dev
NAME POD-SELECTOR AGE
allow-myapp-ingress app=myapp 8s
deny-all-ingress
[root@master networkpolicy]# curl 10.244.2.2
Hello MyApp | Version: v1 | Pod Name
[root@master networkpolicy]# curl 10.244.2.2:443
^C
10. vim allow-net-demo.yaml修改文件。cat allow-net-demo.yaml查看文件(已经放行443端口)。curl 10.244.2.2可以正常访问80端口。curl 10.244.2.2:443可以访问443端口并被正常拒绝。curl 10.244.2.2:6443访问再次被阻塞(端口未放行)。
[root@master networkpolicy]# vim allow-net-demo.yaml
[root@master networkpolicy]# cat allow-net-demo.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-myapp-ingress
spec:
podSelector:
matchLabels:
app: myapp
ingress:
- from:
- ipBlock:
cidr: 10.244.0.0/16
except:
- 10.244.1.2/32
ports:
- protocol: TCP
port: 80
- protocol: TCP
port: 443
[root@master networkpolicy]# curl 10.244.2.2
Hello MyApp | Version: v1 | Pod Name
[root@master networkpolicy]# curl 10.244.2.2:443
curl: (7) Failed connect to 10.244.2.2:443; Connection refused
[root@master networkpolicy]# curl 10.244.2.2:6443
^C
11.vim egree-def.yaml编辑文件。cat egree-def.yaml查看文件(拒绝所有出站流量)。kubectl apply -f egree-def.yaml -n prod声明资源。kubectl get pods -n kube-system -o wide | grep -i dns获取一个外部访问点。kubectl exec pod1 -it -n prod -- /bin/sh进入交互模式。ping 10.244.2.119尝试访问被阻塞。
[root@master networkpolicy]# cp ingress-def.yaml egree-def.yaml
[root@master networkpolicy]# vim egree-def.yaml
[root@master networkpolicy]# cat egree-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-egress
spec:
podSelector: {}
ingress:
- {}
policyTypes:
- Egress
[root@master networkpolicy]# kubectl apply -f egree-def.yaml -n prod
networkpolicy.networking.k8s.io/deny-all-egress created
[root@master networkpolicy]# kubectl get pods -n kube-system -o wide | grep -i dns
coredns-78fcdf6894-p2rb6 1/1 Running 7 5d 10.244.2.119 node2.example.com
coredns-78fcdf6894-pcb99 1/1 Running 6 5d 10.244.2.118 node2.example.com
[root@master networkpolicy]# kubectl exec pod1 -it -n prod -- /bin/sh
/ # ping 10.244.2.119
PING 10.244.2.119 (10.244.2.119): 56 data bytes
12.vim egree-def.yaml编辑文件。cat egree-def.yaml查看文件(egress:- {}放行所有出站流量)。kubectl apply -f egree-def.yaml -n prod声明资源。ping 10.244.2.119可以出站访问。
[root@master networkpolicy]# vim egree-def.yaml
[root@master networkpolicy]# cat egree-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-egress
spec:
podSelector: {}
egress:
- {}
policyTypes:
- Egress
[root@master networkpolicy]# kubectl apply -f egree-def.yaml -n prod
networkpolicy.networking.k8s.io/deny-all-egress configured
/ # ping 10.244.2.119
PING 10.244.2.119 (10.244.2.119): 56 data bytes
64 bytes from 10.244.2.119: seq=0 ttl=62 time=1.033 ms
64 bytes from 10.244.2.119: seq=1 ttl=62 time=0.617 ms