k8s与docker关于CPU竞争测试

实验目的

  • 主要测试docker和k8s在宿主机资源紧张与不紧张情况下资源分配情况

Docker

  • 下面测试结果仅适用于cgroupv1

测试环境

[root@boy ~]# docker info
Client:
 Context:    default
 Debug Mode: false
 Plugins:
  app: Docker App (Docker Inc., v0.9.1-beta3)
  buildx: Docker Buildx (Docker Inc., v0.8.2-docker)
  scan: Docker Scan (Docker Inc., v0.17.0)

Server:
 Containers: 1
  Running: 0
  Paused: 0
  Stopped: 1
 Images: 1
 Server Version: 20.10.17
 Storage Driver: overlay2
  Backing Filesystem: xfs
  Supports d_type: true
  Native Overlay Diff: true
  userxattr: false
 Logging Driver: json-file
 Cgroup Driver: cgroupfs
 Cgroup Version: 1
 Plugins:
  Volume: local
  Network: bridge host ipvlan macvlan null overlay
  Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
 Swarm: inactive
 Runtimes: io.containerd.runtime.v1.linux runc io.containerd.runc.v2
 Default Runtime: runc
 Init Binary: docker-init
 containerd version: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
 runc version: v1.1.2-0-ga916309
 init version: de40ad0
 Security Options:
  seccomp
   Profile: default
 Kernel Version: 3.10.0-1160.el7.x86_64
 Operating System: CentOS Linux 7 (Core)
 OSType: linux
 Architecture: x86_64
 CPUs: 1
 Total Memory: 1.777GiB
 Name: boy
 ID: TTVL:ZALZ:XZLH:CLPA:7Z7A:VPRK:W27K:7ZEQ:RV6Q:RUOE:OYVC:HZ3O
 Docker Root Dir: /var/lib/docker
 Debug Mode: false
 Registry: https://index.docker.io/v1/
 Labels:
 Experimental: false
 Insecure Registries:
  127.0.0.0/8
 Live Restore Enabled: false

WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled

测试代码

package main

import (
	"fmt"
	"log"
	"os"
	"strconv"
	"time"
)

func fibonacci(n int64) int64 {
	if n < 2 {
		return n
	}
	return fibonacci(n-2) + fibonacci(n-1)

}
func main() {
	var i int64
	calCount, err := strconv.ParseInt(os.Args[1], 10, 64)
	if err != nil {
		log.Fatal(err.Error())
	}
	start := time.Now()
	fmt.Println("开始计算")
	for i = 0; i < calCount; i++ {
		fibonacci(i)
	}
	end := time.Since(start)
	fmt.Println("结束计算,消耗时间", end)
}

# 编译
[root@k8s-master-1 test]# go build -o fabonacci main.go

容器单个CPU消耗测试

# 启动一个容器(目前宿主机为4核)
[root@boy ~]# docker run --rm -it -v `pwd`:/root --cpus=1 --cap-add=sys_nice centos:7 bash

# 运行stress测试,让其消耗4个CPU
[[root@a1a220901722 ~]# ./stress -c 1
stress: info: [16] dispatching hogs: 1 cpu, 0 io, 0 vm, 0 hdd
^C


# 查看docker进程,可以发现一个cpu被消耗了
CONTAINER ID   NAME            CPU %     MEM USAGE / LIMIT   MEM %     NET I/O     BLOCK I/O   PIDS
e17f082c26a7   elated_napier   101.44%   636KiB / 1.777GiB   0.03%     656B / 0B   0B / 0B     6



# 查看top,可以发现一个核被消耗了,但是存在一个严重现象,并没有让一个CPU在跑这个任务,只是总体消耗了1个CPU
[root@boy ~]# top -d 1
top - 10:11:00 up 14 min,  2 users,  load average: 0.41, 0.62, 0.39
Tasks: 147 total,   5 running, 142 sleeping,   0 stopped,   0 zombie
%Cpu0  : 24.8 us,  0.0 sy,  0.0 ni, 75.2 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  : 25.0 us,  0.0 sy,  0.0 ni, 75.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  : 25.3 us,  0.0 sy,  0.0 ni, 74.7 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  : 25.0 us,  0.0 sy,  0.0 ni, 75.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st

容器nice优先级测试

# 启动一个容器(目前宿主机为4核)
[root@boy ~]# docker run --rm -it -v `pwd`:/root --cpus=1 --pid=host --cap-add=sys_nice centos:7 bash

# 启动一个优先级低的进程
	nohup nice -n 19 ./stress -c 4 &

# 查看此时进程,可以发现stress还是使用了大概一个核
[root@boy ~]# top -d 1
top - 10:22:39 up 26 min,  2 users,  load average: 2.06, 0.61, 0.38
Tasks: 144 total,   5 running, 139 sleeping,   0 stopped,   0 zombie
%Cpu0  :  0.0 us,  0.0 sy, 25.7 ni, 74.3 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  0.0 us,  0.0 sy, 24.8 ni, 75.2 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :  0.0 us,  0.0 sy, 25.0 ni, 75.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :  0.0 us,  0.0 sy, 25.7 ni, 74.3 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st

# 启动有限级别高的计算程序
[root@39e2cfc64c08 ~]# ./fabonacci 100
开始计算


# 查看此时进程CPU消耗情况,明明fabonacci程序的优先级更高,结果却才拿到1/4个CPU的使用
[root@boy ~]# top -d 1
top - 10:24:14 up 27 min,  2 users,  load average: 1.33, 0.92, 0.52
Tasks: 145 total,   6 running, 139 sleeping,   0 stopped,   0 zombie
%Cpu0  :  0.0 us,  0.0 sy, 25.0 ni, 75.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  0.0 us,  0.0 sy, 24.0 ni, 76.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  : 27.0 us,  0.0 sy,  0.0 ni, 73.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :  0.0 us,  0.0 sy, 24.0 ni, 76.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem :  1863028 total,   991240 free,   362240 used,   509548 buff/cache
KiB Swap:  2097148 total,  2097148 free,        0 used.  1344720 avail Mem 
   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                                     
  3027 root      20   0  703080   6872    536 R  26.7  0.4   0:09.78 fabonacci                            
  3019 root      39  19    7312     96      0 R  24.8  0.0   0:32.28 stress                                
  3021 root      39  19    7312     96      0 R  24.8  0.0   0:31.55 stress                                
  3018 root      39  19    7312     96      0 R  11.9  0.0   0:29.97 stress                       
  3020 root      39  19    7312     96      0 R  11.9  0.0   0:31.48 stress 

绑定CPU nice优先级测试

# 启动一个容器(目前宿主机为4核)
[root@boy ~]# docker run --rm -it -v `pwd`:/root --cpuset-cpus=0 --pid=host --cap-add=sys_nice centos:7 bash


# 启动一个优先级低的进程
	nohup nice -n 19 ./stress -c 4 &

# 查看此时进程,可以发现stress还是使用了大概一个核,且只在CPU0上运行
[root@aaedd8e0fbe6 ~]# top -d 1 
top - 14:36:45 up 40 min,  0 users,  load average: 1.39, 0.94, 0.73
Tasks: 146 total,   5 running, 141 sleeping,   0 stopped,   0 zombie
%Cpu0  :  0.0 us,  1.0 sy, 99.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  0.0 us,  0.0 sy,  0.0 ni,100.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :  0.0 us,  0.0 sy,  0.0 ni,100.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :  0.0 us,  0.0 sy,  0.0 ni,100.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem :  1863028 total,  1002804 free,   350172 used,   510052 buff/cache
KiB Swap:  2097148 total,  2097148 free,        0 used.  1356744 avail Mem

# 启动有限级别高的计算程序
[root@39e2cfc64c08 ~]# ./fabonacci 100
开始计算


# 查看此时进程CPU消耗情况,可以发现在绑核的情况下,此时fabonacci进程由于优先级高,可以拿到CPU更多的时间片去运行程序
[root@boy ~]# top -d 1 
top - 10:37:53 up 41 min,  2 users,  load average: 3.90, 1.75, 1.03
Tasks: 147 total,   6 running, 141 sleeping,   0 stopped,   0 zombie
%Cpu0  : 94.0 us,  0.0 sy,  6.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  0.0 us,  1.0 sy,  0.0 ni, 99.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :  0.0 us,  0.0 sy,  0.0 ni,100.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :  0.0 us,  0.0 sy,  0.0 ni,100.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem :  1863028 total,   998604 free,   354348 used,   510076 buff/cache
KiB Swap:  2097148 total,  2097148 free,        0 used.  1352580 avail Mem 
   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                                        
  3225 root      20   0  702824   4836    536 R  94.1  0.3   0:24.48 fabonacci                                                      
  1583 root      20   0 1181952  52456  20036 S   2.0  2.8   0:06.30 containerd                                                     
  3212 root      39  19    7312    100      0 R   2.0  0.0   0:17.31 stress                                                         
  3213 root      39  19    7312    100      0 R   2.0  0.0   0:17.31 stress                                                         
  3214 root      39  19    7312    100      0 R   2.0  0.0   0:17.31 stress                                                         
  3215 root      39  19    7312    100      0 R   1.0  0.0   0:17.31 stress 

单进程CPU竞争测试

  • 容器test1,test2,test3设置相同的cpu权重,让其竞争CPU资源,理论情况下,三个容器消耗的CPU应该要一样
# 启动容器test1,test2,test3
	docker run --rm -it --name test1 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash
	docker run --rm -it --name test2 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash
	docker run --rm -it --name test3 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash

# test1,test2,test3,分别运行fabonacci程序
	./fabonacci 100

# 查看cpu消耗情况,可以发现,这种情况下,三个容器消耗的CPU一致
CONTAINER ID   NAME      CPU %     MEM USAGE / LIMIT     MEM %     NET I/O      BLOCK I/O         PIDS
5848e427230a   test3     99.78%    6.73MiB / 1.777GiB    0.37%     656B / 0B    0B / 0B           6
3dc90ba71b99   test2     99.96%    6.625MiB / 1.777GiB   0.36%     656B / 0B    0B / 0B           5
14cc70fbbf15   test1     100.19%   8.418MiB / 1.777GiB   0.46%     1.1kB / 0B   14.2MB / 24.6kB   6

多进程CPU竞争测试

  • 容器test1,test2,test3设置相同的cpu权重,让其竞争CPU资源,理论情况下,三个容器消耗的CPU应该要一样
# 启动容器test1,test2,test3
	docker run --rm -it --name test1 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash
	docker run --rm -it --name test2 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash
	docker run --rm -it --name test3 -v `pwd`:/root --cpuset-cpus=0,1,2 --cpu-shares=1024 --pid=host --cap-add=sys_nice centos:7 bash

# test1容器运行三个消耗CPU进程测试
	./stress -c 3

# test2容器运行二个消耗CPU进程测试
	./stress -c 2
	
# test1容器运行一个消耗CPU进程测试
	./stress -c 1
	

# 查看CPU消耗情况,可以发现虽然我们设置了每个容器共同竞争三个CPU,但是实际结果确是单进程显然竞争不过多进程
CONTAINER ID   NAME      CPU %     MEM USAGE / LIMIT     MEM %     NET I/O      BLOCK I/O         PIDS
5848e427230a   test3     74.78%    752KiB / 1.777GiB     0.04%     656B / 0B    0B / 0B           3
3dc90ba71b99   test2     119.89%   668KiB / 1.777GiB     0.04%     656B / 0B    0B / 0B           4
14cc70fbbf15   test1     104.92%   3.078MiB / 1.777GiB   0.17%     1.1kB / 0B   17.1MB / 24.6kB   5

总结

  • 默认docker运行程序下,如果未设置绑核,应用程序存在多个进程,且多个进程存在优先级,那么在这种情况下,可能会导致主进程工作效率降低
  • 在设置容器绑核的情况下,且权重一样,在请求CPU时:单进程显然竞争不过多进程

K8S测试

  • 下面测试仅适用于cgroupv1
  • 宿主机cpu个数为4个

pod nice测试

apiVersion: v1    
kind: Pod         
metadata:         
  name: test1    
spec:             
  containers:     
  - name: test1  
    image: centos:7    
    command: ["/bin/sh","-c","sleep 100000"]    
    volumeMounts:    
    - name: test    
      mountPath: /root   
    resources:
      limits:
        cpu: "1"
      requests:
        cpu: "1"
  volumes:        
  - name: test    
    hostPath:     
      path: /root 
---
apiVersion: v1    
kind: Pod         
metadata:         
  name:test2    
spec:             
  containers:     
  - name: test2   
    image: centos:7    
    command: ["/bin/sh","-c","sleep 100000"]    
    volumeMounts:    
    - name: test    
      mountPath: /root   
    resources:
      limits:
        cpu: "1"
      requests:
        cpu: "1"
  volumes:        
  - name: test    
    hostPath:     
      path: /root
# 启动一个优先级低的进程
	nohup nice -n 19 ./stress -c 4 &

# 查看CPU使用情况
Tasks:   8 total,   5 running,   3 sleeping,   0 stopped,   0 zombie
%Cpu0  :  1.0 us,  0.0 sy, 99.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  2.0 us,  1.0 sy, 97.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :  2.0 us,  0.0 sy, 98.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :  0.0 us,  1.0 sy, 99.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 11486220 free,  1860516 used,  3023536 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14271044 avail Mem 
   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                              
    33 root      39  19    7320    100      0 R  98.0  0.0   0:18.54 stress                                                         
    32 root      39  19    7320    100      0 R  97.0  0.0   0:18.45 stress                                                         
    35 root      39  19    7320    100      0 R  96.0  0.0   0:18.45 stress                                                         
    34 root      39  19    7320    100      0 R  95.0  0.0   0:18.45 stress                                                        
     1 root      20   0    4372    692    620 S   0.0  0.0   0:00.00 sleep                                                         
     7 root      20   0   11968   3172   2648 S   0.0  0.0   0:00.01 bash                                                           
    31 root      39  19    7320    852    768 S   0.0  0.0   0:00.00 stress                                                         
    37 root      20   0   56192   3824   3232 R   0.0  0.0   0:00.00 top 


# 启动有限级别高的计算程序
[root@39e2cfc64c08 ~]# ./fabonacci 100
开始计算


# 查看此时CPU消耗情况,从这里我们可以看出,fabonacci的优先级明明更高,这个pod也拿到了4个CPU,但是fabonacci进程却才拿到了一个CPU使用,与我们预期的结果不一样
Tasks:  10 total,   6 running,   4 sleeping,   0 stopped,   0 zombie
%Cpu0  :  0.0 us,  0.0 sy,100.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :  0.0 us,  0.0 sy,100.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :  1.0 us,  0.0 sy, 99.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 11424908 free,  1921080 used,  3024284 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14210744 avail Mem 
   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                         
    58 root      20   0  702832   6968    736 R  99.0  0.0   0:10.69 fabonacci                                                 
    33 root      39  19    7320    100      0 R  97.0  0.0   1:58.90 stress                                                             
    34 root      39  19    7320    100      0 R  70.0  0.0   1:59.12 stress                                                         
    35 root      39  19    7320    100      0 R  66.0  0.0   1:57.55 stress                                                       
    32 root      39  19    7320    100      0 R  64.0  0.0   1:58.08 stress                                                     
     1 root      20   0    4372    692    620 S   0.0  0.0   0:00.00 sleep                                                   
     7 root      20   0   11968   3172   2648 S   0.0  0.0   0:00.01 bash                                                         
    31 root      39  19    7320    852    768 S   0.0  0.0   0:00.00 stress                                                       
    41 root      20   0   11968   3020   2664 S   0.0  0.0   0:00.00 bash
    62 root      20   0   56192   3896   3304 R   0.0  0.0   0:00.00 top    

request != limit

  • 将test1和test2的request设置0.1,limit设置为1
 # 宿主机运行stress消耗4个CPU
 	./stress -c 4
 
 # test1启动单进程消耗四个CPU测试
 	./stress-test1 -c 4
 
 # test2单进程消耗一个CPU测试
	./stress-test2 -c 1

# 查看CPU消耗
Tasks: 225 total,  10 running, 140 sleeping,   0 stopped,   0 zombie
%Cpu(s): 99.8 us,  0.2 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 13667676 free,  1285744 used,  1416852 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14863644 avail Mem
   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
 33027 root      20   0    7320    100      0 R 100.0  0.0   0:31.96 stress-test2
 32872 root      20   0    7320    100      0 R  57.4  0.0   0:32.44 stress
 32869 root      20   0    7320    100      0 R  49.5  0.0   0:32.81 stress
 32870 root      20   0    7320    100      0 R  48.5  0.0   0:34.62 stress
 32871 root      20   0    7320    100      0 R  42.6  0.0   0:32.80 stress
 32943 root      20   0    7320     96      0 R  25.7  0.0   0:10.75 stress-test1 
 32944 root      20   0    7320     96      0 R  25.7  0.0   0:10.13 stress-test1
 32945 root      20   0    7320     96      0 R  25.7  0.0   0:09.93 stress-test1
 32946 root      20   0    7320     96      0 R  24.8  0.0   0:11.74 stress-test1 
  • 将test1和test2的request设置为0.1,limit设置3
 # 宿主机运行stress消耗4个CPU
 	./stress -c 4
 
 # test1启动单进程消耗四个CPU测试
 	./stress-test1 -c 4
 
 # test2单进程消耗一个CPU测试
	./stress-test2 -c 1

# 查看CPU进程消耗,stress消耗了约1个CPU,stess-test1消耗了约2个CPU,stress-test2消耗了约1个CPU
Tasks: 234 total,  10 running, 140 sleeping,   0 stopped,   0 zombie
%Cpu(s): 99.5 us,  0.5 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 13632096 free,  1292680 used,  1445496 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14850400 avail Mem 

   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND 
 40349 root      20   0    7320    100      0 R 100.0  0.0   0:36.90 stress-test2
 40302 root      20   0    7320    100      0 R  62.7  0.0   0:22.43 stress-test1
 40299 root      20   0    7320    100      0 R  54.9  0.0   0:22.14 stress-test1
 40300 root      20   0    7320    100      0 R  39.2  0.0   0:23.71 stress-test1
 40301 root      20   0    7320    100      0 R  39.2  0.0   0:23.62 stress-test1
 40245 root      20   0    7320    100      0 R  28.4  0.0   0:16.52 stress
 40246 root      20   0    7320    100      0 R  25.5  0.0   0:16.27 stress
 40247 root      20   0    7320    100      0 R  22.5  0.0   0:16.09 stress
 40244 root      20   0    7320    100      0 R  21.6  0.0   0:16.04 stress 
  • 将test1和test2的request设置为0.1,limit设置4
 # 宿主机运行stress消耗4个CPU
 	./stress -c 4
 
 # test1启动单进程消耗四个CPU测试
 	./stress-test1 -c 4
 
 # test2单进程消耗一个CPU测试
	./stress-test2 -c 1

# 查看CPU进程消耗,可以发现stress=1.1个CPU,stress2拿到了1个CPU,stress-test1拿到了约1.7个CPU,说明在宿主机资源紧张的情况下,多进程pod能拿到更多的CPU资源
Tasks: 222 total,  10 running, 140 sleeping,   0 stopped,   3 zombie
%Cpu0  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  : 99.0 us,  1.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 13660348 free,  1289136 used,  1420788 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14860120 avail Mem 

   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                                  
 35928 root      20   0    7320     96      0 R  99.0  0.0   0:49.13 stress-test2 
 35699 root      20   0    7320     96      0 R  52.5  0.0   0:33.98 stress
 35904 root      20   0    7320     96      0 R  51.5  0.0   0:24.51 stress-test1
 35905 root      20   0    7320     96      0 R  44.6  0.0   0:25.86 stress-test1
 35902 root      20   0    7320     96      0 R  40.6  0.0   0:24.63 stress-test1
 35903 root      20   0    7320     96      0 R  38.6  0.0   0:25.77 stress-test1
 35697 root      20   0    7320     96      0 R  23.8  0.0   0:34.52 stress 
 35698 root      20   0    7320     96      0 R  21.8  0.0   0:34.05 stress
 35700 root      20   0    7320     96      0 R  21.8  0.0   0:34.25 stress
  • 将test1和test2的request设置为1.5,limit设置4
 # 宿主机运行stress消耗4个CPU
 	./stress -c 4
 
 # test1启动单进程消耗四个CPU测试
 	./stress-test1 -c 4
 
 # test2单进程消耗一个CPU测试
	./stress-test2 -c 1

# 查看CPU进程消耗,stress=1CPU,stress-test1=1.9CPU,stress-test2=1CPU,可以发现stress-test2并未满足request
Tasks: 232 total,  10 running, 140 sleeping,   0 stopped,   0 zombie
%Cpu0  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  : 99.0 us,  1.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 13622340 free,  1299036 used,  1448896 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14845260 avail Mem 

   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                                                                                                
 46417 root      20   0    7320    100      0 R 100.0  0.0   0:46.03 stress-test2
 46373 root      20   0    7320     96      0 R  59.4  0.0   0:26.35 stress-test1
 46370 root      20   0    7320     96      0 R  50.5  0.0   0:27.54 stress-test1
 46372 root      20   0    7320     96      0 R  40.6  0.0   0:27.65 stress-test1
 46371 root      20   0    7320     96      0 R  39.6  0.0   0:24.50 stress-test1
 46338 root      20   0    7320    100      0 R  27.7  0.0   0:17.54 stress
 46340 root      20   0    7320    100      0 R  27.7  0.0   0:17.46 stress 
 46339 root      20   0    7320    100      0 R  25.7  0.0   0:17.21 stress
 46337 root      20   0    7320    100      0 R  24.8  0.0   0:17.46 stress 

request = limit

  • 将test1和test2的request设置为1.5,limit设置1.5
 # 宿主机运行stress消耗4个CPU
 	./stress -c 4
 
 # test1启动单进程消耗四个CPU测试
 	./stress-test1 -c 4
 
 # test2单进程消耗一个CPU测试
	./stress-test2 -c 1

# 查看CPU消耗,stress=1.5个CPU,stress-test2=1个CPU,stress-test1=1.5个CPU
Tasks: 218 total,  11 running, 141 sleeping,   0 stopped,   0 zombie
%Cpu0  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu1  : 99.0 us,  1.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu2  :100.0 us,  0.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
%Cpu3  : 99.0 us,  1.0 sy,  0.0 ni,  0.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem : 16370272 total, 13623276 free,  1293412 used,  1453584 buff/cache
KiB Swap:        0 total,        0 free,        0 used. 14852812 avail Mem 

   PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND 
 51034 root      20   0    7320    100      0 R 100.0  0.0   2:14.14 stress-test2
 50990 root      20   0    7320     96      0 R  46.0  0.0   0:51.47 stress-test1
 50954 root      20   0    7320    100      0 R  44.0  0.0   0:57.17 stress
 50956 root      20   0    7320    100      0 R  40.0  0.0   0:57.68 stress
 50987 root      20   0    7320     96      0 R  38.0  0.0   0:53.83 stress-test1
 50989 root      20   0    7320     96      0 R  38.0  0.0   0:52.03 stress-test1
 50955 root      20   0    7320    100      0 R  32.0  0.0   0:56.37 stress
 50988 root      20   0    7320     96      0 R  32.0  0.0   0:50.74 stress-test1
 50953 root      20   0    7320    100      0 R  30.0  0.0   0:57.28 stress 
 
 CONTAINER ID   NAME                                                                                                    CPU %     MEM USAGE / LIMIT     MEM %     NET I/O   BLOCK I/O     PIDS
626d0818a06d   k8s_test2_test2_default_97f70ea9-f24e-4893-9a8f-be719fe32a13_0                                          100.12%   3.184MiB / 15.61GiB   0.02%     0B / 0B   0B / 0B       4
b62746e38859   k8s_test1_test1_default_58055750-b529-432a-87b1-5656a32d10d6_0                                          148.64%   3.258MiB / 15.61GiB   0.02%     0B / 0B   0B / 0B       10

总结

  • 容器内如果存在多个进程,会存在低优先级抢占CPU的情况,导致主进程工作效率下降(解决办法:进行绑核处理)
  • 在宿主机资源不紧张的情况下,k8s可以保证每个pod分配request资源,且能将资源限制在limit之下
  • 在宿主机资源紧张的情况下,容器进程多的能抢占到更多的CPU,即使他们request与limit相同,且此时k8s无法保证一定让pod分配到指定request的资源

你可能感兴趣的:(Kubernetes,docker,容器,运维,k8s)