#!/bin/bash
log_path="/home/gongyong/nginx/logs"
cutDate=`date +%Y-%m-%d_%H`
nginx_cmd="/home/gongyong/nginx/bin/nginx"
shellScriptPath="/home/gongyong/crontab"
echo "${cutDate} start delete old log and cut current log" > ${shellScriptPath}/nginx_log_cut.log
toDeleteFiles=`find ${log_path} -mtime +1 -type f -print |grep -E "access|error"`
for toDeleteFile in ${toDeleteFiles}
do
echo "${cutDate} ${toDeleteFile} will be deleted" >> ${shellScriptPath}/nginx_log_cut.log
rm -f ${toDeleteFile}
done
if [ ! -d ${log_path} ];then
echo "${log_path} not exist" > ${shellScriptPath}/nginx_log_cut.log.error
exit 1
fi
cp ${log_path}/access.log ${log_path}/access.log.${cutDate} && > ${log_path}/access.log
cp ${log_path}/error.log ${log_path}/error.log.${cutDate} && > ${log_path}/error.log
# reopen a new log file
${nginx_cmd} -s reopen -p /home/gongyi/nginx/
向nginx主进程发送USR1信号,重新打开日志文件,否则会继续往mv后的文件写数据的。原因在于:linux系统中,内核是根据文件描述符来找文件的。如果不这样操作导致日志切割失败。
#!/bin/bash
log_path="/home/gongyong/nginx/logs"
cutDate=`date +%Y-%m-%d_%H`
nginx_cmd="/home/gongyong/nginx/bin/nginx"
shellScriptPath="/home/gongyong/crontab"
echo "${cutDate} start delete old log and cut current log" > ${shellScriptPath}/nginx_log_cut.log
toDeleteFiles=`find ${log_path} -mtime +1 -type f -print |grep -E "access|error"`
for toDeleteFile in ${toDeleteFiles}
do
echo "${cutDate} ${toDeleteFile} will be deleted" >> ${shellScriptPath}/nginx_log_cut.log
rm -f ${toDeleteFile}
done
if [ ! -d ${log_path} ];then
echo "${log_path} not exist" > ${shellScriptPath}/nginx_log_cut.log.error
exit 1
fi
cp ${log_path}/access.log ${log_path}/access.log.${cutDate} && > ${log_path}/access.log
cp ${log_path}/error.log ${log_path}/error.log.${cutDate} && > ${log_path}/error.log
kill -USR1 `ps axu | grep "nginx: master process" | grep -v grep | awk '{print $2}'`
[root@weblogic logrotate.d]# pwd
/etc/logrotate.d
[root@weblogic logrotate.d]# cat nginx
/var/log/nginx/*log {
daily
rotate 10
dateext
missingok
notifempty
# compress
delaycompress
create 640 nginx adm
sharedscripts
postrotate
[ -f /var/run/nginx.pid ] && /bin/kill -USR1 $(cat /var/run/nginx.pid 2>/dev/null) 2>/dev/null || :
endscript
}
/var/log/nginx/为nginx日志的存储目录,可以根据实际情况进行修改。
daily:日志文件将按天轮循。
weekly:日志文件将按周轮循。
monthly:日志文件将按月轮循。
missingok:在日志轮循期间,任何错误将被忽略,例如“文件无法找到”之类的错误。
rotate 10:一次存储10个日志文件。对于第11个日志文件,时间最久的那个日志文件将被删除。
dateext:定义日志文件后缀是日期格式,也就是切割后文件是:xxx.log-20160402.gz这样的格式。如果该参数被注释掉,切割出来是按数字递增,即前面说的 xxx.log-1这种格式。
compress:在轮循任务完成后,已轮循的归档将使用gzip进行压缩。
delaycompress:总是与compress选项一起用,delaycompress选项指示logrotate不要将最近的归档压缩,压缩将在下一次轮循周期进行。这在你或任何软件仍然需要读取最新归档时很有用。
notifempty:如果是空文件的话,不进行转储。
create 640 nginx adm:以指定的权限和用户属性,创建全新的日志文件,同时logrotate也会重命名原始日志文件。
postrotate/endscript:在所有其它指令完成后,postrotate和endscript里面指定的命令将被执行。在这种情况下,rsyslogd进程将立即再次读取其配置并继续运行。注意:这两个关键字必须单独成行。
[root@weblogic logrotate.d]# cat /etc/anacrontab
# /etc/anacrontab: configuration file for anacron
# See anacron(8) and anacrontab(5) for details.
SHELL=/bin/sh
PATH=/sbin:/bin:/usr/sbin:/usr/bin
MAILTO=root
# the maximal random delay added to the base delay of the jobs
RANDOM_DELAY=45
# the jobs will be started during the following hours only
START_HOURS_RANGE=3-22
#period in days delay in minutes job-identifier command
5 cron.daily nice run-parts /etc/cron.daily
25 cron.weekly nice run-parts /etc/cron.weekly
@monthly 45 cron.monthly nice run-parts /etc/cron.monthly
# the jobs will be started during the following hours only
START_HOURS_RANGE=3-22