# free -m
以MB为单位显示内存使用情况
[root@localhost ~]# free -m total used free shared buff/cache available Mem: 11852 1250 8668 410 1934 9873 Swap: 6015 0 6015
# free -h
以GB为单位显示内存使用情况
[root@localhost ~]# free -h total used free shared buff/cache available Mem: 11G 1.2G 8.5G 410M 1.9G 9.6G Swap: 5.9G 0B 5.9G
# free -t
以总和的形式查询内存的使用信息
[root@localhost ~]# free -t total used free shared buff/cache available Mem: 12137332 1285344 8870628 420268 1981360 10105740 Swap: 6160380 0 6160380 Total: 18297712 1285344 15031008
# free -s 5
周期性的查询内存使用信息
每5秒执行一次命令
[root@localhost ~]# free -s 5 total used free shared buff/cache available Mem: 12137332 1280796 8875008 420268 1981528 10110136 Swap: 6160380 0 6160380
解释:
Mem:内存的使用情况总览表(物理内存)
Swap:虚拟内存。即可以把数据存放在硬盘上的数据
shared:共享内存,即和普通用户共享的物理内存值
buffers:用于存放要输出到disk(块设备)的数据的
cached:存放从disk上读出的数据
total:机器总的物理内存
used:用掉的内存
free:空闲的物理内存
注:物理内存(total)=系统看到的用掉的内存(used)+系统看到空闲的内存(free)
# cat /proc/PID/status | grep VmRSS
[root@localhost ~]# pidof nginx 27327 27326 [root@localhost ~]# [root@localhost ~]# cat /proc/27327/status | grep VmRSS VmRSS: 2652 kB [root@localhost ~]# [root@localhost ~]# cat /proc/27326/status | grep VmRSS VmRSS: 1264 kB [root@localhost ~]# [root@localhost ~]# pidof java 1973 [root@localhost ~]# cat /proc/1973/status | grep VmRSS VmRSS: 1166852 kB
由上面可知,nginx服务进程的两个pid所占物理内存为"2652+1264=3916k"
# cat mem_per.sh
[root@localhost ~]# cat mem_per.sh #!/bin/bash ps auxw|awk '{if (NR>1){print $4}}' > /opt/mem_list awk '{MEM_PER+=$1}END{print MEM_PER}' /opt/mem_list [root@localhost ~]# [root@localhost ~]# chmod 755 mem_per.sh [root@localhost ~]# [root@localhost ~]# sh mem_per.sh 64.4 [root@localhost ~]#
脚本配置解释:
ps -auxw|awk '{print $3}' 表示列出本机所有进程的cpu利用率情况,结果中第一行带"%CPU"字符
ps -auxw|awk '{print $4}' 表示列出本机所有进程的内存利用率情况,结果中第一行带"%MEM"字符
ps auxw|awk '{if (NR>1){print $4}} 表示将"ps auxw"结果中的第一行过滤(NR>1)掉,然后打印第4行
top后键入P看一下谁占用最大
# top -d 5
周期性的查询CPU使用信息
每5秒刷新一次
top - 02:37:55 up 4 min, 1 user, load average: 0.02, 0.10, 0.05 Tasks: 355 total, 1 running, 354 sleeping, 0 stopped, 0 zombie %Cpu(s): 3.0 us, 2.8 sy, 0.0 ni, 94.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st # us:表示用户空间程序的cpu使用率(没有通过nice调度) # sy:表示系统空间的cpu使用率,主要是内核程序。 # id:空闲cpu KiB Mem : 1868660 total, 1081340 free, 578388 used, 208932 buff/cache KiB Swap: 4194300 total, 4194300 free, 0 used. 1123992 avail Mem PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 2220 mysql 20 0 1307796 471172 7608 S 0.6 25.2 0:02.31 mysqld 349 root 20 0 0 0 0 S 0.4 0.0 0:01.14 kworker/0:3 644 root 20 0 0 0 0 S 0.4 0.0 0:00.17 xfsaild/dm-0 3489 root 20 0 146432 2268 1440 R 0.4 0.1 0:00.11 top 1 root 20 0 44500 7120 2596 S 0.2 0.4 0:01.69 systemd 283 root 39 19 0 0 0 S 0.2 0.0 0:00.18 khugepaged 2621 root 20 0 141264 5140 3896 S 0.2 0.3 0:00.18 sshd 2 root 20 0 0 0 0 S 0.0 0.0 0:00.01 kthreadd 3 root 20 0 0 0 0 S 0.0 0.0 0:00.01 ksoftirqd/0 4 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kworker/0:0 5 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/0:0H 6 root 20 0 0 0 0 S 0.0 0.0 0:00.02 kworker/u256:0 7 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/0 8 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcu_bh 9 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcuob/0 10 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcuob/1 11 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcuob/2
、ps auxw(查看本机的进程所占cpu和mem的百分比情况)
使用"ps auxw" 可以查看到本机的进程所占cpu和mem的百分比情况
# ps auxw | head -1
%CPU 进程的cpu占用率
%MEM 进程的内存占用率
[root@localhost ~]# ps auxw | head -1 USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND [root@localhost ~]#
查看java进程所占本机的cpu百分比, 如下为0.2%
# ps auxw |grep -v grep|grep -w java|awk '{print $3}'
[root@localhost ~]# ps auxw |grep -v grep|grep -w java|awk '{print $3}' 0.2
查看java进程所占本机的内存百分比, 如下为30.0%
# ps auxw |grep -v grep|grep -w java|awk '{print $4}'
[root@localhost ~]# ps auxw |grep -v grep|grep -w java|awk '{print $4}' 30.0
# cat cpu_per.sh
[root@localhost ~]# cat cpu_per.sh #!/bin/bash ps auxw|awk '{if (NR>1){print $3}}' > /opt/cpu_list awk '{CPU_PER+=$1}END{print CPU_PER}' /opt/cpu_list [root@localhost ~]# [root@localhost ~]# chmod 755 cpu_per.sh [root@localhost ~]# [root@localhost ~]# sh cpu_per.sh 44.5 [root@localhost ~]#
# 总核数 = 物理CPU个数 X 每颗物理CPU的核数
# 总逻辑CPU数 = 物理CPU个数 X 每颗物理CPU的核数 X 超线程数
# cat /proc/cpuinfo| grep "processor"| wc -l
[root@localhost ~]# cat /proc/cpuinfo| grep "processor"| wc -l 6
# cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l
[root@localhost ~]# cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l 1
# lscpu
服务器1:
[root@localhost ~]# lscpu Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 6 On-line CPU(s) list: 0-5 Thread(s) per core: 1 Core(s) per socket: 6 座: 1 NUMA 节点: 1 厂商 ID: GenuineIntel CPU 系列: 6 型号: 15 型号名称: Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz 步进: 11 CPU MHz: 2194.916 BogoMIPS: 4389.83 超管理器厂商: KVM 虚拟化类型: 完全 L1d 缓存: 32K L1i 缓存: 32K L2 缓存: 4096K NUMA 节点0 CPU: 0-5 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx lm constant_tsc rep_good nopl eagerfpu pni ssse3 cx16 pcid sse4_2 x2apic hypervisor lahf_lm
服务器2:
[root@bogon ~]# lscpu Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 8 On-line CPU(s) list: 0-7 Thread(s) per core: 1 Core(s) per socket: 8 Socket(s): 1 NUMA node(s): 1 Vendor ID: GenuineIntel CPU family: 6 Model: 79 Model name: Intel(R) Xeon(R) CPU E7-4830 v4 @ 2.00GHz Stepping: 1 CPU MHz: 1995.192 BogoMIPS: 3990.38 Hypervisor vendor: VMware Virtualization type: full L1d cache: 32K L1i cache: 32K L2 cache: 256K L3 cache: 35840K NUMA node0 CPU(s): 0-7 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts mmx fxsr sse sse2 ss ht syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts nopl xtopology tsc_reliable nonstop_tsc aperfmperf eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx hypervisor lahf_lm 3dnowprefetch epb dtherm arat pln pts
# cat /proc/cpuinfo| grep "cpu cores"| uniq
[root@localhost ~]# cat /proc/cpuinfo| grep "cpu cores"| uniq cpu cores : 6
# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +3 | head -10
[root@localhost ~]# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +3 | head -10 USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 9416 0.8 0.0 163544 6284 ? Ss 08:38 0:00 sshd: root@pts/0 root 3783 0.7 0.0 12072 2032 ? S May26 357:27 /bin/bash /usr/local/VMOptimizationTools/sangfor_guest_datareport root 9545 0.6 0.0 163224 5904 ? Ss 08:38 0:00 sshd: root@notty root 3966 0.3 0.0 12704 2676 ? S May26 183:12 /bin/bash /usr/local/VMOptimizationTools/sangfor_update_ipc_callback root 3784 0.3 0.0 12560 2552 ? S May26 182:42 /bin/bash /usr/local/VMOptimizationTools/sangfor_sfping 33 10431 0.3 0.1 409704 17832 ? S 08:38 0:00 /usr/sbin/apache2 -k start root 3986 0.2 0.0 12452 2280 ? S May26 122:23 /bin/bash /usr/local/VMOptimizationTools/sangfor_vm_proxyd_w root 3781 0.2 0.0 12740 2672 ? S May26 115:59 /bin/bash /usr/local/VMOptimizationTools/sangfor_vm_proxyd 500 23785 0.2 2.0 1790172 249528 ? Ss Jun25 11:30 oraclehelowin (LOCAL=NO) root 4053 0.1 0.0 12508 2520 ? S May26 75:16 /bin/bash /usr/local/VMOptimizationTools/sangfor_watchdog
# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +4 | head -10
[root@localhost ~]# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +4 | head -10 USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND 500 32304 0.0 5.7 1794892 700976 ? Ss May31 2:53 ora_dbw0_helowin 500 4785 0.0 4.5 1797912 550132 ? Ss May29 4:03 ora_dbw0_helowin 500 4796 0.0 3.5 1798308 426468 ? Ss May29 2:11 ora_smon_helowin 500 25850 0.0 2.5 1810144 307340 ? Ss Jun17 0:10 oraclehelowin (LOCAL=NO) 500 32471 0.0 2.4 1810184 299704 ? Ss Jun14 0:30 oraclehelowin (LOCAL=NO) 500 3927 0.0 2.3 1791308 283440 ? Ss Jun26 0:13 oraclehelowin (LOCAL=NO) 500 5432 0.0 2.1 1794272 261692 ? Ss May29 7:16 ora_cjq0_helowin 500 23785 0.2 2.0 1790172 249528 ? Ss Jun25 11:30 oraclehelowin (LOCAL=NO) 500 19092 0.0 2.0 1793248 253080 ? Ss Jun21 0:28 oraclehelowin (LOCAL=NO) 500 32310 0.0 1.8 1794224 229200 ? Ss May31 1:25 ora_smon_helowin
查看占用cpu最高的进程
# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +3 | head -1
[root@localhost ~]# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +3 | head -1 USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 3783 0.7 0.0 12072 2032 ? S May26 357:32 /bin/bash /usr/local/VMOptimizationTools/sangfor_guest_datareport
获取占用内存资源最高的进程
# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +4 | head -1
[root@localhost ~]# ps aux | head -1; ps aux | grep -v PID | sort -rn -k +4 | head -1 USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND 500 32304 0.0 5.7 1794892 700976 ? Ss May31 2:53 ora_dbw0_helowin
比如查看java进程的启动时间和运行时长
# ps -ef|grep -v grep|grep -w java|awk '{print $2}'
# ps -eo pid,lstart,etime | grep 1973
其中:
Mon Jun 24 09:25:41 2019 为java进程的启动时间
4-00:16:55 为java进程的运行时长,天-小时-分钟-秒
[root@localhost ~]# ps -ef|grep -v grep|grep -w java|awk '{print $2}' 1973 [root@localhost ~]# [root@localhost ~]# ps -eo pid,lstart,etime | grep 1973 1973 Mon Jun 24 09:25:41 2019 4-00:16:55 [root@localhost ~]# [root@localhost ~]# date Fri Jun 28 09:42:48 CST 2019
查看所有进程的启动事件、运行时长
# ps -eo user,pid,lstart,etime,cmd
查看nginx进程启动的精确时间和启动后运行的时长
# ps -eo pid,lstart,etime,cmd|grep nginx
[root@nginx-proxy-client ~]# ps -eo pid,lstart,etime,cmd|grep nginx 1418 Mon Jun 24 13:38:18 2019 3-20:21:05 nginx: master process /usr/local/nginx/sbin/nginx 1419 Mon Jun 24 13:38:18 2019 3-20:21:05 nginx: worker process 5543 Fri Jun 28 09:59:23 2019 00:00 grep --color=auto nginx [root@nginx-proxy-client ~]# [root@nginx-proxy-client ~]# date 2019年 06月 28日 星期五 09:59:45 CST [root@nginx-proxy-client ~]#
# ifconfig
[root@localhost ~]# ifconfig eno16777736: flags=4163mtu 1500 inet 192.168.2.80 netmask 255.255.255.0 broadcast 192.168.2.255 inet6 fe80::20c:29ff:fe4c:ff47 prefixlen 64 scopeid 0x20 ether 00:0c:29:4c:ff:47 txqueuelen 1000 (Ethernet) RX packets 7866 bytes 632606 (617.7 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 215 bytes 31932 (31.1 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 lo: flags=73 mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 inet6 ::1 prefixlen 128 scopeid 0x10 loop txqueuelen 0 (Local Loopback) RX packets 8 bytes 400 (400.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 8 bytes 400 (400.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
# ip a
[root@localhost ~]# ip a 1: lo:mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eno16777736: mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:4c:ff:47 brd ff:ff:ff:ff:ff:ff inet 192.168.2.80/24 brd 192.168.2.255 scope global eno16777736 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:fe4c:ff47/64 scope link valid_lft forever preferred_lft forever
# df -Th
查看分区、挂载情况
[root@localhost ~]# df -Th Filesystem Type Size Used Avail Use% Mounted on /dev/mapper/centos-root xfs 15G 2.8G 13G 19% / devtmpfs devtmpfs 903M 0 903M 0% /dev tmpfs tmpfs 913M 0 913M 0% /dev/shm tmpfs tmpfs 913M 8.6M 904M 1% /run tmpfs tmpfs 913M 0 913M 0% /sys/fs/cgroup /dev/sda1 xfs 297M 115M 183M 39% /boot tmpfs tmpfs 183M 0 183M 0% /run/user/0
# lsblk
查看磁盘情况
[root@localhost ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 20G 0 disk ├─sda1 8:1 0 300M 0 part /boot └─sda2 8:2 0 19G 0 part ├─centos-root 253:0 0 15G 0 lvm / └─centos-swap 253:1 0 4G 0 lvm [SWAP] sr0 11:0 1 4G 0 rom
# fdisk -l
查看详细的硬盘分区情况
[root@localhost ~]# fdisk -l Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk label type: dos Disk identifier: 0x0004a0a8 Device Boot Start End Blocks Id System /dev/sda1 * 2048 616447 307200 83 Linux /dev/sda2 616448 40478719 19931136 8e Linux LVM Disk /dev/mapper/centos-root: 16.1 GB, 16106127360 bytes, 31457280 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk /dev/mapper/centos-swap: 4294 MB, 4294967296 bytes, 8388608 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes
end