Canal 是一个项目的名称,它试图将Flannel提供的网络层与Calico的网络策略功能集成在一起。然而,当贡献者完成细节工作时却发现,很明显,如果Flannel和Calico这两个项目的标准化和灵活性都已各自确保了话,那集成也就没那么大必要了。结果,这个官方项目变得有些“烂尾”了,不过却实现了将两种技术部署在一起的预期能力。出于这个原因,即使这个项目不复存在,业界还是会习惯性地将Flannel和Calico的组成称为“Canal”。
由于Canal是Flannel和Calico的组合,因此它的优点也在于这两种技术的交叉。网络层用的是Flannel提供的简单overlay,可以在许多不同的部署环境中运行且无需额外的配置。在网络策略方面,Calico强大的网络规则评估,为基础网络提供了更多补充,从而提供了更多的安全性和控制,类似于我们系统上的防火墙。
[root@k8smaster ~]# mkdir /data/canal
[root@k8smaster ~]# cd /data/canal/
[root@k8smaster canal]# wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
--2020-01-07 10:56:42-- https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 2400:6180:0:d1::684:6001, 134.209.106.40
Connecting to docs.projectcalico.org (docs.projectcalico.org)|2400:6180:0:d1::684:6001|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2423 (2.4K) [application/x-yaml]
Saving to: ‘rbac.yaml’
100%[=====================================================================================================================>] 2,423 5.09KB/s in 0.5s
2020-01-07 10:57:08 (5.09 KB/s) - ‘rbac.yaml’ saved [2423/2423]
[root@k8smaster canal]# wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml
--2020-01-07 10:57:14-- https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 2400:6180:0:d1::575:a001, 178.128.17.49
Connecting to docs.projectcalico.org (docs.projectcalico.org)|2400:6180:0:d1::575:a001|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10344 (10K) [application/x-yaml]
Saving to: ‘canal.yaml’
100%[=====================================================================================================================>] 10,344 3.19KB/s in 3.2s
2020-01-07 10:57:19 (3.19 KB/s) - ‘canal.yaml’ saved [10344/10344]
[root@k8smaster canal]# kubectl apply -f rbac.yaml
clusterrole.rbac.authorization.k8s.io/calico unchanged
clusterrole.rbac.authorization.k8s.io/flannel unchanged
clusterrolebinding.rbac.authorization.k8s.io/canal-flannel unchanged
clusterrolebinding.rbac.authorization.k8s.io/canal-calico unchanged
[root@k8smaster canal]# kubectl apply -f canal.yaml
configmap/canal-config unchanged
daemonset.extensions/canal configured
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org unchanged
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org unchanged
serviceaccount/canal unchanged
[root@k8smaster canal]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
canal-gf42f 3/3 Running 0 65m
canal-kzxv8 3/3 Running 0 65m
canal-wrqvn 3/3 Running 0 65m
coredns-bf7759867-8h4x8 1/1 Running 9 70d
coredns-bf7759867-slmsz 1/1 Running 9 70d
etcd-k8smaster 1/1 Running 24 70d
kube-apiserver-k8smaster 1/1 Running 66 68d
kube-controller-manager-k8smaster 1/1 Running 20 70d
kube-flannel-ds-amd64-6zhtw 1/1 Running 18 70d
kube-flannel-ds-amd64-wnh9k 1/1 Running 9 70d
kube-flannel-ds-amd64-wqvz9 1/1 Running 18 70d
kube-proxy-2j8w9 1/1 Running 18 70d
kube-proxy-kqxlq 1/1 Running 17 70d
kube-proxy-nb82z 1/1 Running 9 70d
kube-scheduler-k8smaster 1/1 Running 20 70d
kubernetes-dashboard-7d75c474bb-hggrb 1/1 Running 2 4d19h
traefik-ingress-controller-8wbtb 0/1 CrashLoopBackOff 509 21d
traefik-ingress-controller-bmpbk 0/1 CrashLoopBackOff 509 21d
#拒绝所有入站
[root@k8smaster canal]# kubectl create namespace dev
namespace/dev created
[root@k8smaster canal]# kubectl create namespace prod
namespace/prod created
[root@k8smaster canal]# vim ingress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-ingress
spec:
podSelector: {}
policyTypes:
- Ingress
[root@k8smaster canal]# kubectl apply -f ingress-def.yaml -n dev
networkpolicy.networking.k8s.io/deny-all-ingress created
[root@k8smaster ~]# kubectl get netpol -n dev
NAME POD-SELECTOR AGE
deny-all-ingress 19m
[root@k8smaster ~]# vim pod-a.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod1
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
[root@k8smaster ~]# kubectl apply -f pod-a.yaml -n dev
pod/pod1 created
[root@k8smaster ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 11s 10.244.2.2 k8snode2
#访问不到这个Pod
[root@k8smaster ~]# curl 10.244.2.2
[root@k8smaster ~]# kubectl apply -f pod-a.yaml -n prod
pod/pod1 created
[root@k8smaster ~]# kubectl get pods -n prod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 32s 10.244.1.3 k8snode1
#可以访问
[root@k8smaster ~]# curl 10.244.1.3
Hello MyApp | Version: v1 | Pod Name
#允许所有入站
[root@k8smaster canal]# vim ingress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-ingress
spec:
podSelector: {}
ingress:
- {}
policyTypes:
- Ingress
[root@k8smaster canal]# kubectl apply -f ingress-def.yaml -n dev
networkpolicy.networking.k8s.io/deny-all-ingress configured
[root@k8smaster ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 45s 10.244.2.4 k8snode2
[root@k8smaster ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 45s 10.244.2.4 k8snode2
#可以访问到
[root@k8smaster ~]# curl 10.244.2.4
Hello MyApp | Version: v1 | Pod Name
#放行特定的入站流量
[root@k8smaster canal]# kubectl label pods pod1 app=myapp -n dev
pod/pod1 labeled
[root@k8smaster canal]# vim allow-netpol.yaml
#允许app为myapp的Pod的80端口可以被10.244.0.0/16网段访问,并且添加了一个例外10.244.1.3/32,也可以访问。
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-myapp-ingress
spec:
podSelector:
matchLabels:
app: myapp
ingress:
- from:
- ipBlock:
cidr: 10.244.0.0/16
except:
- 10.244.1.3/32
ports:
- protocol: TCP
port: 80
[root@k8smaster canal]# kubectl apply -f allow-netpol.yaml -n dev
networkpolicy.networking.k8s.io/allow-myapp-ingress created
[root@k8smaster canal]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 47m 10.244.2.4 k8snode2
#可以访问到
[root@k8smaster canal]# curl 10.244.2.4
Hello MyApp | Version: v1 | Pod Name
#拒绝所有出站流量
[root@k8smaster canal]# vim egress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-egress
spec:
podSelector: {}
policyTypes:
- Egress
[root@k8smaster canal]# kubectl apply -f egress-def.yaml -n prod
networkpolicy.networking.k8s.io/deny-all-egress created
[root@k8smaster ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
canal-gf42f 3/3 Running 38 6h17m 192.168.43.136 k8snode1
canal-kzxv8 3/3 Running 40 6h17m 192.168.43.45 k8smaster
canal-wrqvn 3/3 Running 43 6h17m 192.168.43.176 k8snode2
coredns-bf7759867-8h4x8 1/1 Running 10 70d 10.244.0.3 k8smaster
coredns-bf7759867-slmsz 1/1 Running 10 70d 10.244.0.2 k8smaster
etcd-k8smaster 1/1 Running 25 70d 192.168.43.45 k8smaster
kube-apiserver-k8smaster 1/1 Running 100 69d 192.168.43.45 k8smaster
kube-controller-manager-k8smaster 1/1 Running 22 70d 192.168.43.45 k8smaster
kube-flannel-ds-amd64-6zhtw 1/1 Running 18 70d 192.168.43.136 k8snode1
kube-flannel-ds-amd64-wnh9k 1/1 Running 10 70d 192.168.43.45 k8smaster
kube-flannel-ds-amd64-wqvz9 1/1 Running 19 70d 192.168.43.176 k8snode2
kube-proxy-2j8w9 1/1 Running 19 70d 192.168.43.176 k8snode2
kube-proxy-kqxlq 1/1 Running 17 70d 192.168.43.136 k8snode1
kube-proxy-nb82z 1/1 Running 10 70d 192.168.43.45 k8smaster
kube-scheduler-k8smaster 1/1 Running 22 70d 192.168.43.45 k8smaster
kubernetes-dashboard-7d75c474bb-8cdhz 1/1 Running 0 3h11m 10.244.1.4 k8snode1
traefik-ingress-controller-8wbtb 0/1 CrashLoopBackOff 539 21d 10.244.2.3 k8snode2
traefik-ingress-controller-bmpbk 0/1 CrashLoopBackOff 536 21d 10.244.1.149 k8snode1
#测试访问coredns-bf7759867-8h4x8的ip地址。
#由于拒绝了所有出站流量,所以访问不到。
[root@k8smaster canal]# kubectl exec -it pod1 -n prod -- /bin/sh
/ # ping 10.244.0.3
PING 10.244.0.3 (10.244.0.3): 56 data bytes
^C
--- 10.244.0.3 ping statistics ---
4 packets transmitted, 0 packets received, 100% packet loss
/ # exit
command terminated with exit code 1
#允许所有出站流量。
[root@k8smaster canal]# vim egress-def.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-egress
spec:
podSelector: {}
egress:
- {}
policyTypes:
- Egress
[root@k8smaster canal]# kubectl apply -f egress-def.yaml -n prod
networkpolicy.networking.k8s.io/deny-all-egress configured
[root@k8smaster canal]# kubectl exec -it pod1 -n prod -- /bin/sh
#由于允许了所有出站规则,所以又能访问到coredns-bf7759867-8h4x8的ip地址。
/ # ping 10.244.0.3
PING 10.244.0.3 (10.244.0.3): 56 data bytes
64 bytes from 10.244.0.3: seq=0 ttl=62 time=0.928 ms
64 bytes from 10.244.0.3: seq=1 ttl=62 time=0.639 ms
64 bytes from 10.244.0.3: seq=2 ttl=62 time=0.794 ms
^C
--- 10.244.0.3 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.639/0.787/0.928 ms