同步本机时间
ntpdate 210.72.145.44
清除系统缓存,空出更多内存
free && sync && echo 3 > /proc/sys/vm/drop_caches && free
杀掉僵尸进程
kill $(ps -A -ostat,ppid | awk '/[zZ]/ && !a[$2]++ {print $2}')
显示全部arp解析
tcpdump 'arp' -e -i eth0 -n -p -t |grep is-at
eth0对应要换成你的显步名称
监看本机网卡端口情况
tcpdump -n -vv tcp port $1 -i em1
em1为对应的网卡名称。
检查本机连接数
netstat -nat |awk '{print $6}'|sort|uniq -c|sort -nr
查看tomcat日志中的异常
tail -F /var/log/tomcat8/catalina.out |grep -E 'Exception|at' |grep -v WARN
这里tomcat8要对应成你的相应版本
删除5天以前的tomcat日志
sudo find /var/lib/tomcat8/logs/ -mtime +5 -exec rm {} ;
清空 memcache 缓存
以下存成脚本,
#!/bin/sh
#实现通过主机名,端口清相应的memcache缓存
if(($#<2));then
echo "usage:$0 host port";
exit 1;
fi
#如果参数缺失,退出程序,返回状态1
exec 6<>/dev/tcp/$1/$2 2>/dev/null;
#打开host的port 可读写的socket连接,与文件描述符6连接
if(($?!=0));then
echo "open $1 $2 error!";
exit 1;
fi
#如果打开失败,$?返回不为0,终止程序
echo -e "flush_all">&6;
echo -e "quit">&6;
#将HEAD 信息,发送给socket连接
cat<&6;
#从socket读取返回信息,显示为标准输出
exec 6<&-;
exec 6>&-;
#关闭socket的输入,输出
exit 0;
修改VirtualBox虚拟机的内存分配
保存脚本,第一个参数为虚拟机的名称,第二个为内存大小,如2G
#!/bin/bash
VM=$1
VBoxManage controlvm $VM poweroff
VBoxManage modifyvm $VM --memory $2
VBoxManage startvm $VM --type headless
为VirtualBox 虚拟机加磁盘
#!/bin/sh
#machine=phptest
machine=$1
VBoxManage controlvm "$machine" poweroff
disk=/home/xwx/VirtualBox VMs/$machine/${machine}_swap.vdi
#VBoxManage createhd --filename "$disk" --size 1024
#VBoxManage storageattach "$machine" --storagectl "IDE" --port 1 --type hdd --medium $disk
#VBoxManage storageattach "$machine" --storagectl SATA --port 1 --type hdd --medium $disk
VBoxManage storageattach "$machine" --storagectl "SATA 控制器" --port 1 --type hdd --medium "$disk"
修改克隆虚拟机的ip地址
虚拟机克隆之前,第一次启动时需要修改ip才能远程控制:
#!/bin/bash
# set modify
ip=/etc/network/interfaces
hn=/etc/hostname
netmask=255.255.255.0
network=192.168.20.0
broadcast=192.168.20.255
gateway=192.168.20.1
# mod ip、mask、gw、dns、hostname
cp $ip /etc/network/interfaces.bak
sed -ri 's/(iface eth0 inet).*/iface eth0 inet static/' /etc/network/interfaces
echo "Please input IP:"
read ipadd
if [ -n "$ipadd" ]; then
echo "address $ipadd" >> $ip
echo "Modify Completed "
else
echo "Not Modified"
fi
echo "netmask $netmask" >> $ip
echo "Netmask Modify Completed "
echo "network $network" >> $ip
echo "Network Modify Completed "
echo "broadcast $broadcast" >> $ip
echo "Broadcast Modify Completed "
echo "gateway $gateway" >> $ip
echo "Gateway Modify Completed "
echo "Please input hostname:"
read hostname
if [ -n "$hostname" ]; then
echo "$hostname" > $hn
echo "Modify Completed "
else
echo "Default Hostname"
fi
echo "All modification completion"
read -n1 -p "Whether restart network [Y/N]?"
case $REPLY in
Y|y) echo
/etc/init.d/networking restart;;
N|n) echo
echo "Network needs to restart to take effect!!!!!!";;
esac
exit
实时统计nginx日志
使用goaccess软件,可能用apt install goaccess
或yum install goaccess
安装。
sudo goaccess /var/log/nginx/access.log --log-format='%h %^[%d:%t %^] "%r" %s %b "%R" "%u" "-" "%v"' --date-format='%d/%b/%Y' --time-format='%H:%M:%S'
备份nginx配置文件
nginx会频繁修改,改之前最好备份一下:
###################################################################
#######mysqldump###################################################
#!/bin/sh
# -----------------------------
# the directory for story your backup file.
backup_dir="/home/your/backup"
# date format for backup file (dd-mm-yyyy)
time="$(date +"%Y%m%d")"
MKDIR="$(which mkdir)"
RM="$(which rm)"
MV="$(which mv)"
TAR="$(which tar)"
GZIP="$(which gzip)"
#针对不同系统,如果环境变量都有。可以去掉
# check the directory for store backup is writeable
test ! -w $backup_dir && echo "Error: $backup_dir is un-writeable." && exit 0
# the directory for story the newest backup
test ! -d "$backup_dir" && $MKDIR "$backup_dir"
$TAR -zcPf $backup_dir/$HOSTNAME.nginx.$time.tar.gz /etc/nginx
$TAR -zcPf $backup_dir/$HOSTNAME.cron_daily.$time.tar.gz /etc/cron.daily
#delete the oldest backup 30 days ago
find $backup_dir -name "*.gz" -mtime +30 |xargs rm -rf
exit 0;
nginx 自动筛选出访问量过大的ip进行屏避
#!/bin/bash
nginx_home=/etc/nginx
log_path=/var/log/nginx
tail -n10000 $log_path/access.log
|awk '{print $1,$12}'
|grep -i -v -E "google|yahoo|baidu|msnbot|FeedSky|sogou"
| grep -v '223.223.198.231'
|awk '{print $1}'|sort|uniq -c|sort -rn
|awk '{if($1>50)print "deny "$2";"}' >>./blockips.conf
sort ./blockips.conf |uniq -u >./blockips_new.conf
mv ./blockips.conf ./blockips_old.conf
mv ./blockips_new.conf ./blockips.conf
cat ./blockips.conf
#service nginx reload
监控各网站首页
#!/bin/sh
RED='033[0;31m'
GREEN='033[0;32m'
NC='033[0m' # No Color
function test_domain {
local domain=$1
status=`curl -s -o /dev/null -I -w "%{http_code}" $domain`
if [ $status -eq '404' ]
then
printf "${domain}${RED} ${status}${NC}n"
else
printf "$domain$GREEN $status$NCn"
fi
}
domain_list=$'bixuebihui.cnnwww.bixuebihui.cnndev.bixuebihui.cnnblog.bixuebihui.cnnbixuebihui.comnwww.bixuebihui.com'
while read -r domain; do
# echo "... $domain ..."
test_domain "http://$domain"
test_domain "https://$domain"
done <<< "$domain_list"
从mysql日志里过滤慢sql
#!/usr/bin/perl
#
# Nathanial Hendler
# http://retards.org/
#
# 2001-06-26 v1.0
#
# This perl script parses a MySQL slow_queries log file
# ignoring all queries less than $min_time and prints
# out how many times a query was greater than $min_time
# with the seconds it took each time to run. The queries
# are sorted by number of times it took; the most often
# query appearing at the bottom of the output.
#
# Usage: mysql_slow_log_parser logfile
#
# ------------------------
# SOMETHING TO THINK ABOUT (aka: how to read output)
# ------------------------
#
# Also, it does to regex substitutions to normalize
# the queries...
#
# $query_string =~ s/d+/XXX/g;
# $query_string =~ s/(['"]).+?(['"])/$1XXX$2/g;
#
# These replace numbers with XXX and strings found in
# quotes with XXX so that the same select statement
# with different WHERE clauses will be considered
# as the same query.
#
# so these...
#
# SELECT * FROM offices WHERE office_id = 3;
# SELECT * FROM offices WHERE office_id = 19;
#
# become...
#
# SELECT * FROM offices WHERE office_id = XXX;
#
#
# And these...
#
# SELECT * FROM photos WHERE camera_model LIKE 'Nikon%';
# SELECT * FROM photos WHERE camera_model LIKE '%Olympus';
#
# become...
#
# SELECT * FROM photos WHERE camera_model LIKE 'XXX';
#
#
# ---------------------
# THIS MAY BE IMPORTANT (aka: Probably Not)
# ---------------------
#
# *SO* if you use numbers in your table names, or column
# names, you might get some oddities, but I doubt it.
# I mean, how different should the following queries be
# considered?
#
# SELECT car1 FROM autos_10;
# SELECT car54 FROM autos_11;
#
# I don't think so.
#
$min_time = 0; # Skip queries less than $min_time
$min_rows = 0;
$max_display = 10; # Truncate display if more than $max_display occurances of a query
print "n Starting... n";
$query_string = '';
$time = 0;
$new_sql = 0;
##############################################
# Loop Through The Logfile
##############################################
while (<>) {
# Skip Bogus Lines
next if ( m|/.*mysqld, Version:.+ started with:| );
next if ( m|Tcp port: d+ Unix socket: .*mysql.sock| );
next if ( m|Times+Ids+Commands+Argument| );
next if ( m|administrators+command:| );
# print $_;
# if ( /Query_time:s+(.*)s+Lock_time:s+(.*)s/ ) {
#if ( /Query_time:s+(.*)s+Lock_time:s+(.*)s+Rows_examined:s+(d+)/ ) {
if ( /Query_time:s+(.*)s+Lock_time:s+(.*)s+Rows_examined:s+(.*)/ ) {
$time = $1;
$rows = $3;
$new_sql = 1;
# print "found $1 $3n";
next;
}
if ( /^#/ && $query_string ) {
if (($time > $min_time) && ($rows >= $min_rows)) {
$orig_query = $query_string;
$query_string =~ s/d+/XXX/g;
$query_string =~ s/'([^']*(.[^']*)*)'/'XXX'/g;
$query_string =~ s/"([^"]*(.[^"]*)*)"/"XXX"/g;
#$query_string =~ s/(['"]).+?(['"])/$1XXX$2/g;
#$query_string =~ s/s+/ /g;
#$query_string =~ s/n+/n/g;
push @{$queries{$query_string}}, $time;
push @{$queries_rows{$query_string}}, $rows;
$queries_tot{$query_string} += $time;
$queries_orig{$query_string} = $orig_query;
$query_string = '';
}
} else {
if ($new_sql) {
$query_string = $_;
$new_sql = 0;
} else {
$query_string .= $_;
}
}
}
##############################################
# Display Output
##############################################
foreach my $query ( sort { $queries_tot{$b} <=> $queries_tot{$a} } keys %queries_tot ) {
my $total = 0;
my $cnt = 0;
my @seconds = sort { $a <=> $b } @{$queries{$query}};
my @rows = sort { $a <=> $b } @{$queries_rows{$query}};
($total+=$_) for @seconds;
($cnt++) for @seconds;
print "### " . @{$queries{$query}} . " Quer" . ((@{$queries{$query}} > 1)?"ies ":"y ") . "n";
print "### Total time: " . $total .", Average time: ".($total/$cnt)."n";
print "### Taking ";
print @seconds > $max_display ? "$seconds[0] to $seconds[-1]" : sec_joiner(@seconds);
print " seconds to completen";
print "### Rows analyzed ";
print @rows > $max_display ? "$rows[0] - $rows[-1]": sec_joiner(@rows);
print "n";
print "$queryn";
print $queries_orig{$query}."nn";
}
sub sec_joiner {
my ($seconds) = @_;
$string = join(", ", @{$seconds});
$string =~ s/, (d+)$/ and $1/;
return $string;
}
exit(0);
本机路由表
ip route add 5.6.13.192/26 dev em1 src 5.6.13.218 table 10 ip route add default via 5.6.13.254 table 10 ip route add 5.6.13.192/26 dev em2 src 5.6.13.217 table 20 ip route add default via 5.6.13.254 table 20 ip route add 5.6.13.192/26 dev em1 src 5.6.13.218 ip route add 5.6.13.192/26 dev em2 src 5.6.13.217 ip route add default via 5.6.13.254 ip rule add from 5.6.13.218 table 10 ip rule add from 5.6.13.217 table 20 ip route flush cache
出现异常时,用钉钉dingtalk报警
#!/bin/python
# -*- coding: utf-8 -*-
from flask import Flask
from flask import request
import json
import requests
app = Flask(__name__)
def transform(text):
textMap = json.loads(text)
nodePorturl = 'http://192.168.10.182:3672'
externalURL = textMap['externalURL']
print(externalURL)
links =[]
for alert in textMap['alerts']:
print('-------------')
time = alert['startsAt'] + ' -- ' + alert['endsAt']
generatorURL = alert['generatorURL'];
generatorURL = nodePorturl+generatorURL[generatorURL.index('graph'):]
summary = alert['annotations']['summary']
description = alert['annotations']['description']
status = alert['status']
title = alert['labels']['alertname']