原因:iptables没有具体的设备响应,kube-proxy需要使用--proxy-mode=ipvs
[root@kubernetes bak4]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6694fb884c-mgn79 1/1 Running 0 146m
coredns-6694fb884c-ncqh6 1/1 Running 0 146m
etcd-kubernetes 1/1 Running 11 49d
kube-apiserver-kubernetes 1/1 Running 10 49d
kube-controller-manager-kubernetes 1/1 Running 6 49d
kube-flannel-ds-amd64-5cv9n 1/1 Running 6 49d
kube-flannel-ds-amd64-6tzvm 1/1 Running 5 49d
kube-flannel-ds-amd64-827f9 1/1 Running 6 49d
kube-proxy-7ndzn 1/1 Running 6 49d
kube-proxy-ft6wc 1/1 Running 5 49d
kube-proxy-nvc4l 1/1 Running 6 49d
kube-scheduler-kubernetes 1/1 Running 6 49d
kube-proxy有报错
[root@kubernetes bak4]# kubectl logs -n kube-system kube-proxy-7ndzn
W1110 09:13:34.247156 1 proxier.go:493] Failed to load kernel module ip_vs with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.248189 1 proxier.go:493] Failed to load kernel module ip_vs_rr with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.250441 1 proxier.go:493] Failed to load kernel module ip_vs_wrr with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.251811 1 proxier.go:493] Failed to load kernel module ip_vs_sh with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.256361 1 server_others.go:295] Flag proxy-mode="" unknown, assuming iptables proxy
I1110 09:13:34.264897 1 server_others.go:148] Using iptables Proxier.
I1110 09:13:34.265123 1 server_others.go:178] Tearing down inactive rules.
I1110 09:13:34.282035 1 server.go:464] Version: v1.13.3
I1110 09:13:34.288611 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_max' to 131072
I1110 09:13:34.288637 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 09:13:34.290663 1 conntrack.go:83] Setting conntrack hashsize to 32768
I1110 09:13:34.290831 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400
I1110 09:13:34.290892 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600
I1110 09:13:34.291011 1 config.go:102] Starting endpoints config controller
I1110 09:13:34.291023 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 09:13:34.291040 1 config.go:202] Starting service config controller
I1110 09:13:34.291044 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 09:13:34.391530 1 controller_utils.go:1034] Caches are synced for service config controller
I1110 09:13:34.391624 1 controller_utils.go:1034] Caches are synced for endpoints config controller
[root@kubernetes bak4]# kubectl edit cm kube-proxy -n kube-system
修改的部分的截图
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
所有的节点都需要更改
[root@kubernetes bak4]# cat /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
[root@kubernetes bak4]# chmod 755 /etc/sysconfig/modules/ipvs.modules
[root@kubernetes bak4]# bash /etc/sysconfig/modules/ipvs.modules
[root@kubernetes bak4]# lsmod |grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 141092 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack_ipv4 15053 6
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
nf_conntrack 133387 9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
重启kube-proxy
[root@kubernetes bak4]# kubectl get pods -n kube-system |grep kube-proxy
kube-proxy-7ndzn 1/1 Running 6 49d
kube-proxy-ft6wc 1/1 Running 5 49d
kube-proxy-nvc4l 1/1 Running 6 49d
[root@kubernetes bak4]# kubectl get pods -n kube-system |grep kube-proxy|awk '{print $1}'| xargs kubectl delete pod -n kube-system
pod "kube-proxy-7ndzn" deleted
pod "kube-proxy-ft6wc" deleted
pod "kube-proxy-nvc4l" deleted
已经无报错
[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-h6kwp
I1110 15:30:58.565092 1 server_others.go:189] Using ipvs Proxier.
W1110 15:30:58.565309 1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:30:58.565420 1 server_others.go:216] Tearing down inactive rules.
I1110 15:30:58.603102 1 server.go:464] Version: v1.13.3
I1110 15:30:58.608057 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:30:58.608802 1 config.go:202] Starting service config controller
I1110 15:30:58.608813 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:30:58.608910 1 config.go:102] Starting endpoints config controller
I1110 15:30:58.608915 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:30:58.709075 1 controller_utils.go:1034] Caches are synced for endpoints config controller
I1110 15:30:58.709124 1 controller_utils.go:1034] Caches are synced for service config controller
[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-kbxcr
I1110 15:30:55.564636 1 server_others.go:189] Using ipvs Proxier.
W1110 15:30:55.564845 1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:30:55.565141 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.1:443/TCP/192.168.73.133:6443
I1110 15:30:55.565179 1 graceful_termination.go:174] Deleting rs: 10.96.0.1:443/TCP/192.168.73.133:6443
I1110 15:30:55.565208 1 graceful_termination.go:160] Trying to delete rs: 192.168.73.172:31247/TCP/10.244.1.30:80
I1110 15:30:55.565220 1 graceful_termination.go:174] Deleting rs: 192.168.73.172:31247/TCP/10.244.1.30:80
I1110 15:30:55.565247 1 graceful_termination.go:160] Trying to delete rs: 192.168.73.168:31247/TCP/10.244.1.30:80
I1110 15:30:55.565259 1 graceful_termination.go:174] Deleting rs: 192.168.73.168:31247/TCP/10.244.1.30:80
I1110 15:30:55.565277 1 graceful_termination.go:160] Trying to delete rs: 192.168.73.133:31247/TCP/10.244.1.30:80
I1110 15:30:55.565288 1 graceful_termination.go:174] Deleting rs: 192.168.73.133:31247/TCP/10.244.1.30:80
I1110 15:30:55.565310 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:9153/TCP/10.244.2.22:9153
I1110 15:30:55.565325 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:9153/TCP/10.244.2.22:9153
I1110 15:30:55.565335 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:9153/TCP/10.244.1.24:9153
I1110 15:30:55.565346 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:9153/TCP/10.244.1.24:9153
I1110 15:30:55.565364 1 graceful_termination.go:160] Trying to delete rs: 192.168.73.101:31247/TCP/10.244.1.30:80
I1110 15:30:55.565376 1 graceful_termination.go:174] Deleting rs: 192.168.73.101:31247/TCP/10.244.1.30:80
I1110 15:30:55.565394 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/TCP/10.244.2.22:53
I1110 15:30:55.565428 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/TCP/10.244.2.22:53
I1110 15:30:55.565441 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/TCP/10.244.1.24:53
I1110 15:30:55.565455 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/TCP/10.244.1.24:53
I1110 15:30:55.565474 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/UDP/10.244.2.22:53
I1110 15:30:55.565487 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/UDP/10.244.2.22:53
I1110 15:30:55.565497 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/UDP/10.244.1.24:53
I1110 15:30:55.565509 1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/UDP/10.244.1.24:53
I1110 15:30:55.565558 1 graceful_termination.go:160] Trying to delete rs: 10.97.64.43:80/TCP/10.244.1.30:80
I1110 15:30:55.565592 1 graceful_termination.go:174] Deleting rs: 10.97.64.43:80/TCP/10.244.1.30:80
I1110 15:30:55.565616 1 graceful_termination.go:160] Trying to delete rs: 10.244.0.0:31247/TCP/10.244.1.30:80
I1110 15:30:55.565629 1 graceful_termination.go:174] Deleting rs: 10.244.0.0:31247/TCP/10.244.1.30:80
I1110 15:30:55.565648 1 graceful_termination.go:160] Trying to delete rs: 127.0.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565664 1 graceful_termination.go:174] Deleting rs: 127.0.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565682 1 graceful_termination.go:160] Trying to delete rs: 172.17.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565693 1 graceful_termination.go:174] Deleting rs: 172.17.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565713 1 graceful_termination.go:160] Trying to delete rs: 10.96.0.3:10051/TCP/10.244.2.26:10051
I1110 15:30:55.565726 1 graceful_termination.go:174] Deleting rs: 10.96.0.3:10051/TCP/10.244.2.26:10051
I1110 15:30:55.565750 1 server_others.go:216] Tearing down inactive rules.
E1110 15:30:55.594545 1 proxier.go:432] Failed to execute iptables-restore for nat: exit status 1 (iptables-restore: line 7 failed
)
I1110 15:30:55.597338 1 server.go:464] Version: v1.13.3
I1110 15:30:55.602835 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:30:55.605060 1 config.go:102] Starting endpoints config controller
I1110 15:30:55.605073 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:30:55.605293 1 config.go:202] Starting service config controller
I1110 15:30:55.605300 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:30:55.705688 1 controller_utils.go:1034] Caches are synced for service config controller
I1110 15:30:55.705688 1 controller_utils.go:1034] Caches are synced for endpoints config controller
[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-s86dr
I1110 15:31:00.779612 1 server_others.go:189] Using ipvs Proxier.
W1110 15:31:00.779923 1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:31:00.779999 1 server_others.go:216] Tearing down inactive rules.
I1110 15:31:00.820185 1 server.go:464] Version: v1.13.3
I1110 15:31:00.824642 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:31:00.825227 1 config.go:202] Starting service config controller
I1110 15:31:00.825237 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:31:00.825247 1 config.go:102] Starting endpoints config controller
I1110 15:31:00.825249 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:31:00.925362 1 controller_utils.go:1034] Caches are synced for service config controller
I1110 15:31:00.925368 1 controller_utils.go:1034] Caches are synced for endpoints config controller
容器内部测试成功
bash-5.0$ ping 10.96.0.10
PING 10.96.0.10 (10.96.0.10) 56(84) bytes of data.
64 bytes from 10.96.0.10: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 10.96.0.10: icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from 10.96.0.10: icmp_seq=3 ttl=64 time=0.052 ms
64 bytes from 10.96.0.10: icmp_seq=4 ttl=64 time=0.055 ms
^C
--- 10.96.0.10 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.051/0.052/0.055/0.007 ms
bash-5.0$ ping zabbix-server
PING zabbix-server.default.svc.cluster.local (10.96.0.3) 56(84) bytes of data.
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=1 ttl=64 time=0.037 ms
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=3 ttl=64 time=0.047 ms
^C
--- zabbix-server.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.037/0.045/0.052/0.008 ms
bash-5.0$ ping zabbix-web
PING zabbix-web.default.svc.cluster.local (10.97.64.43) 56(84) bytes of data.
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=1 ttl=64 time=0.056 ms
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=2 ttl=64 time=0.050 ms
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=3 ttl=64 time=0.047 ms
^C
--- zabbix-web.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 4ms
rtt min/avg/max/mdev = 0.047/0.051/0.056/0.003 ms
bash-5.0$ ping mysql-server
PING mysql-server.default.svc.cluster.local (10.99.100.149) 56(84) bytes of data.
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=1 ttl=64 time=0.046 ms
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=3 ttl=64 time=0.065 ms
^C
--- mysql-server.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.046/0.054/0.065/0.010 ms
bash-5.0$ ping 10.99.100.149
PING 10.99.100.149 (10.99.100.149) 56(84) bytes of data.
64 bytes from 10.99.100.149: icmp_seq=1 ttl=64 time=0.137 ms
64 bytes from 10.99.100.149: icmp_seq=2 ttl=64 time=0.044 ms
^C
--- 10.99.100.149 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.044/0.090/0.137/0.047 ms
bash-5.0$ ping 10.96.0.3
PING 10.96.0.3 (10.96.0.3) 56(84) bytes of data.
64 bytes from 10.96.0.3: icmp_seq=1 ttl=64 time=0.057 ms
64 bytes from 10.96.0.3: icmp_seq=2 ttl=64 time=0.053 ms
^C
--- 10.96.0.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.053/0.055/0.057/0.002 ms
bash-5.0$
bash-5.0$ ping 10.97.64.43
PING 10.97.64.43 (10.97.64.43) 56(84) bytes of data.
64 bytes from 10.97.64.43: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 10.97.64.43: icmp_seq=2 ttl=64 time=0.044 ms
^C
--- 10.97.64.43 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.044/0.047/0.051/0.007 ms