先来看看《kubernetes权威指南》中对Service类型的介绍:
[root@k8s-master test-namespace]# kubectl get svc -n test
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-app-svc ClusterIP 10.96.240.115 <none> 8090/TCP 14d
[root@k8s-master test-namespace]# ping 10.96.240.115
PING 10.96.240.115 (10.96.240.115) 56(84) bytes of data.
64 bytes from 10.96.240.115: icmp_seq=1 ttl=64 time=0.045 ms
64 bytes from 10.96.240.115: icmp_seq=2 ttl=64 time=0.035 ms
^C
--- 10.96.240.115 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.035/0.040/0.045/0.005 ms
翻阅大量资料后发现,跟KubeProxy的代理模式有很大关系,接下来我们再看看KubeProxy的介绍
kube-proxy 是集群中每个节点(node)上所运行的网络代理, 实现 Kubernetes 服务(Service) 概念的一部分。kube-proxy 维护节点上的一些网络规则, 这些网络规则会允许从集群内部或外部的网络会话与 Pod 进行网络通信。如果操作系统提供了可用的数据包过滤层,则 kube-proxy 会通过它来实现网络规则。 否则,kube-proxy 仅做流量转发。
kubeproxy提供以下几种代理模式:
iptables:clusterIP 只是 iptables 中的规则,只会处理 ip:port 四层数据包,reject 了icmp。不能 ping 通。
IPVS:clusterIP 会绑定到虚拟网卡 kube-ipvs0,配置了 route路由到回环网卡,icmp 包是 lo 网卡回复的。可以 ping 通。
由此理解了我环境的clusterIP为什么能ping通了,在测试环境也找到了kube-ipvs0的虚拟网卡和虚拟IP地址
[root@k8s-master test-namespace]# ifconfig kube-ipvs0
kube-ipvs0: flags=130<BROADCAST,NOARP> mtu 1500
inet 10.106.125.71 netmask 255.255.255.255 broadcast 0.0.0.0
ether e2:09:df:13:ae:7e txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@k8s-master test-namespace]# ip add
11: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether e2:09:df:13:ae:7e brd ff:ff:ff:ff:ff:ff
inet 10.106.125.71/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.110.143.22/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.97.55.252/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.104.10.78/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.99.137.44/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.108.95.38/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.111.108.30/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.108.242.176/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.96.0.10/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.108.233.252/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.111.66.62/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.97.43.250/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.103.174.59/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.96.0.1/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.96.240.115/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.98.130.231/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.104.208.98/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
另外,kube-ipvs0虚拟网卡上面绑定的IP地址个数和k8s service的个数相等
[root@k8s-master test-namespace]# ip add | grep -c 'inet 10'
18
[root@k8s-master test-namespace]# kubectl get svc -A | grep -v None |wc -l
18
我的环境是ipvs模式,刚开始创建集群的时候修改过,修改方式如下:
[root@k8s-master test-namespace]# kubectl get cm -n kube-system
NAME DATA AGE
calico-config 4 22d
coredns 1 22d
extension-apiserver-authentication 6 22d
kube-proxy 2 22d
kube-root-ca.crt 1 22d
kubeadm-config 2 22d
kubelet-config-1.20 1 22d
[root@k8s-master test-namespace]# kubectl edit cm kube-proxy -n kube-system
...
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: "ipvs" ##默认这里是空值,需要修改为ipvs
...
```
**修改完kubeproxy的配置文件,需要重启kubeproxy的pod,逐一删除即可**
查看系统是否开启ipvs模块
[root@iZ8vb05skym8uiglcfnnslZ ~]# lsmod | grep ip_vs
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 10 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_nat_masquerade_ipv6,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c 12644 3 ip_vs,nf_nat,nf_conntrack
安装ipvsadm来管理ipvs规则
[root@k8s-master test-namespace]# yum -y install ipvsadm
[root@k8s-master test-namespace]# ipvsadm -L -n --stats | grep -A 2 10.96.240.115
TCP 10.96.240.115:8090 2 8 4 432 224
-> 10.244.36.79:80 1 4 2 216 112
-> 10.244.36.80:80 1 4 2 216 112
参考链接:https://segmentfault.com/a/1190000039349716