Infiniband 网络性能测试

1、带宽测试
 
在server端执行
[ibtests]# ib_send_bw -a -c UD -d mlx4_0 -i 1
------------------------------------------------------------------
                    Send BW Test
Connection type : UD
Inline data is used up to 1 bytes message
  local address:  LID 0x0b, QPN 0x28004c, PSN 0xfaa100
  remote address: LID 0x02, QPN 0x70004b, PSN 0xc14da8
Mtu : 2048
------------------------------------------------------------------
 #bytes #iterations    BW peak[MB/sec]    BW average[MB/sec]
------------------------------------------------------------------

 

在client端执行 
[ibtests]# ib_send_bw -a -c UD -d mlx4_0 -i 1 10.10.11.8
------------------------------------------------------------------
                    Send BW Test
Connection type : UD
Inline data is used up to 1 bytes message
  local address:  LID 0x02, QPN 0x70004b, PSN 0xc14da8
  remote address: LID 0x0b, QPN 0x28004c, PSN 0xfaa100
Mtu : 2048
------------------------------------------------------------------
 #bytes #iterations    BW peak[MB/sec]    BW average[MB/sec]
      2        1000               7.51                  7.21
      4        1000              15.29                 14.19
      8        1000              30.66                 30.45
     16        1000              60.33                 59.95
     32        1000             119.53                113.20
     64        1000             233.75                233.16
    128        1000             414.95                413.64
    256        1000             794.90                698.20
    512        1000            1600.46                774.67
   1024        1000            2011.81                804.29
   2048        1000            2923.29               2919.91
------------------------------------------------------------------

 

2、延时测试
在server端执行
[ibtests]# ib_send_lat -a -c UD -d mlx4_0 -i 1
------------------------------------------------------------------
                    Send Latency Test
Inline data is used up to 400 bytes message
Connection type : UD
   local address: LID 0x0b QPN 0x2c004c PSN 0xa1be86
  remote address: LID 0x02 QPN 0x74004b PSN 0x6ea837
------------------------------------------------------------------
 #bytes #iterations    t_min[usec]    t_max[usec]  t_typical[usec]
      2        1000           1.41           4.45             1.43
      4        1000           1.41           3.84             1.43
      8        1000           1.41           2.75             1.43
     16        1000           1.41           3.01             1.42
     32        1000           1.49           3.92             1.50
     64        1000           1.55           3.96             1.57
    128        1000           1.70           2.58             1.71
    256        1000           2.41           5.73             2.45
    512        1000           2.82           4.07             2.90
   1024        1000           3.28           4.95             3.31
   2048        1000           4.11          11.74             4.14
------------------------------------------------------------------

  

在client端执行
[ibtests]# ib_send_lat -a -c UD -d mlx4_0 -i 2 10.10.11.8
------------------------------------------------------------------
                    Send Latency Test
Inline data is used up to 400 bytes message
Connection type : UD
   local address: LID 0x02 QPN 0x74004b PSN 0x6ea837
  remote address: LID 0x0b QPN 0x2c004c PSN 0xa1be86
------------------------------------------------------------------
 #bytes #iterations    t_min[usec]    t_max[usec]  t_typical[usec]
      2        1000           1.41           9.97             1.43
      4        1000           1.38           5.31             1.43
      8        1000           1.41           2.78             1.43
     16        1000           1.40           4.01             1.42
     32        1000           1.49           3.67             1.50
     64        1000           1.55           5.20             1.56
    128        1000           1.69           3.13             1.71
    256        1000           2.40           5.72             2.45
    512        1000           2.83           4.13             2.90
   1024        1000           3.28           4.95             3.31
   2048        1000           4.11          11.68             4.14
------------------------------------------------------------------

 

2、其他测试工具

#qperf
#server端执行
[root@server ~]# qperf
#client端执行 [root@client ~]# qperf 172.26.2.41 ud_lat ud_bw rc_rdma_read_bw rc_rdma_write_bw uc_rdma_write_bw tcp_bw tcp_lat udp_bw udp_lat ud_lat: latency = 4.41 us ud_bw: send_bw = 2.63 GB/sec recv_bw = 2.63 GB/sec rc_rdma_read_bw: bw = 3.31 GB/sec rc_rdma_write_bw: bw = 3.41 GB/sec uc_rdma_write_bw: send_bw = 3.4 GB/sec recv_bw = 3.36 GB/sec tcp_bw: bw = 2.11 GB/sec tcp_lat: latency = 8.56 us udp_bw: send_bw = 2.84 GB/sec recv_bw = 699 MB/sec udp_lat: latency = 8.03 us

#iperf3
#server端执行
[root@server ~]# iperf3 -s -p 10081

#client端执行
[tpsa@client ~]$ iperf3 -c 172.26.2.41 -t 300 -p 10081

  

 

3、网络调优

 

#启用connected模式(默认是datagram模式,datagram模式下网络延时更低,connected模式下网络带宽更高),接口带宽提高一倍左右
echo connected > /sys/class/net/ib0/mode
or
sed -i 's/SET_IPOIB_CM=.*/SET_IPOIB_CM=yes' /etc/infiniband/openib.conf
/etc/init.d/openibd restart
#系统参数调优(centos7)
systemctl status tuned.service #看看是否启用了tuned服务
tuned-adm profile  network-throughput #优化网络带宽
tuned-adm profile network-latency #优化网络延时
tuned-adm active #查看当前配置
# 停止irqbalance服务
# systemctl stop irqbalance && systemctl disable irqbalance

#查看ib接口与哪个cpu相邻
#numa_num=$(cat /sys/class/net/ib0/device/numa_node)

#对ib网卡中断做绑核操作
#/usr/sbin/set_irq_affinity_bynode.sh $numa_num ib0
#[root@server ~]$ rpm -qf /usr/sbin/set_irq_affinity_bynode.sh
mlnx-ofa_kernel-3.3-OFED.3.3.1.0.0.1.gf583963.rhel7u2.x86_64

#验证绑核
#查看ib0使用的中断号
[root@server ~]# ls /sys/class/net/ib0/device/msi_irqs                                                                                                                   
100  102  104  55  57  59  61  63  65  67  69  71  75  77  79  81  83  85  87  89  91  93  95  97  99
101  103  54   56  58  60  62  64  66  68  70  74  76  78  80  82  84  86  88  90  92  94  96  98

#查看某个中断号的smp_affinity值
[root@server ~]# cat /proc/irq/100/smp_affinity
0000,00001000

#跟默认值对比
[root@server ~]# cat /proc/irq/default_smp_affinity

 

#也可以通过mellanox提供的工具自动优化
# mlnx_tune -h

Usage: mlnx_tune [options]

 

 

Options:

-h, --help show this help message and exit

-d, --debug_info dump system debug information without setting a

profile

-r, --report Report HW/SW status and issues without setting a

profile

-c, --colored Switch using colored/monochromed status reports. Only

applicable with --report

-p PROFILE, --profile=PROFILE

Set profile and run it. choose from:

['HIGH_THROUGHPUT',

'IP_FORWARDING_MULTI_STREAM_THROUGHPUT',

'IP_FORWARDING_MULTI_STREAM_PACKET_RATE',

'IP_FORWARDING_SINGLE_STREAM',

'IP_FORWARDING_SINGLE_STREAM_0_LOSS',

'IP_FORWARDING_SINGLE_STREAM_SINGLE_PORT',

'LOW_LATENCY_VMA']

-q, --verbosity print debug information to the screen [default False]

-v, --version print tool version and exit [default False]

-i INFO_FILE_PATH, --info_file_path=INFO_FILE_PATH

info_file path. [default %s]

 

#显示当前配置状态

# mlnx_tune -r 

 

#开始优化,

# mlnx_tune -p HIGH_THROUGHPUT

[root@server ~]# rpm -qf `which mlnx_tune`
mlnx-ofa_kernel-3.3-OFED.3.3.1.0.0.1.gf583963.rhel7u2.x86_64

  

 3、查看接口信息

[root@gz-cs-gpu-3-8 eden]# ibstat
CA 'mlx4_0'
        CA type: MT26428
        Number of ports: 1
        Firmware version: 2.9.1000
        Hardware version: b0
        Node GUID: 0x0002c9030059ddda
        System image GUID: 0x0002c9030059dddd
        Port 1:
                State: Active
                Physical state: LinkUp
                Rate: 40
                Base lid: 58
                LMC: 0
                SM lid: 1
                Capability mask: 0x02510868
                Port GUID: 0x0002c9030059dddb
                Link layer: InfiniBand
[root@gz-cs-gpu-3-8 eden]# ibstatus
Infiniband device 'mlx4_0' port 1 status:
        default gid:     fe80:0000:0000:0000:0002:c903:0059:dddb
        base lid:        0x3a
        sm lid:          0x1
        state:           4: ACTIVE
        phys state:      5: LinkUp
        rate:            40 Gb/sec (4X QDR)
        link_layer:      InfiniBand

 

InfiniBand Link Signal Pairs  Signaling Rate  Data Rate (Full Duplex) 
1X-SDR 2 2.5 Gbps  2.0 Gbps 
4X-SDR  10 Gbps (4 x 2.5 Gbps)  8 Gbps (4 x 2 Gbps) 
12X-SDR 24  30 Gbps (12 x 2.5 Gbps)  24 Gbps (12 x 2 Gbps) 
1X-DDR  5 Gbps  4.0 Gbps 
4X-DDR  20 Gbps (4 x 5 Gbps)  16 Gbps (4 x 4 Gbps) 
12X-DDR  24  60 Gbps (12 x 5 Gbps)  48 Gbps (12 x 4 Gbps) 
1X-QDR  10 Gbps 8.0 Gbps 
4X-QDR  40 Gbps (4 x 5 Gbps)  32 Gbps (4 x 8 Gbps) 
12XQDDR  24  1200 Gbps (12 x 5 Gbps)  96 Gbps (12 x 8 Gbps) 

 

 

 

 

 

 

 

 

 

 

 

 

 

转载于:https://www.cnblogs.com/edenlong/p/10273433.html

你可能感兴趣的:(Infiniband 网络性能测试)