@
- 一、ffmpeg腳本
- 二、java jar包啟動-剔除Pom中依賴
- 三、Java jar包通用啟動腳本
- 四、查看centos配置信息腳本
- 五、Jenkins項目打包發布腳本
- 六、Mysql-使用腳本進行分庫分表備份
- 七、實時監控網卡流量的通用腳本
- 八、監控磁盤的監控腳本
- 九、指定時間內網站訪問次數的監控
- 十、基於Ping和Telnet/NC的監控腳本案例分析
- 十一、監控某個目錄是否被更改
- 十二、日志文件ERROR監控報警
- 十三、網站訪問狀態和超時時間監控報警設置
- 十四、服務器磁盤監控腳本分享(含報警郵件)
- 十五、業務日志清理腳本
- 十六、Linux下間隔多少秒 (即以秒為單位) 去執行某條命令或某個shell腳本的操作方法
- 十七、Linux下批量ping某個網段ip的腳本
- 十八、查看系統運行情況
- 十九、管理docker
- 二十、shell多線程備份數據庫
一、ffmpeg腳本
1.1 打開進程,並判斷進程數量
原版本:
#!/bin/bash
ffmpegPid = $( ps -ef | grep -E 'ffmpeg.*$1$2$3' | grep -v 'grep' | awk '{print $2}')
if [ -z "$ffmpegPid" ]
then
threadNum = $( ps -ef | grep ffmpeg | grep -v 'grep' | wc -l | awk '{print $1}')
if [$threadNum -gt 10]
then
# 線程數大於10個,需要干掉ffmpeg線程
echo threadNum too more!
for thread in $threadNum
do
kill -9 ${thread}
done
else
nohup ffmpeg -re -rtsp_transport tcp -i "rtsp://ip:port/dss/monitor/params?cameraid=$1%24$2&substream=$3" -vcodec libx264 -vprofile baseline -acodec aac -ar 44100 -strict -2 -ac 1 -f flv -s 1280x720 -q 10 $4 > /Users/chuxiaowei/ffmpeg$1$2$3.log 2>&1 &
ps -ef | grep -E 'ffmpeg.*$1$2$3' | grep -v 'grep' | awk '{print $2}'
fi
else
ps -ef | grep -E 'ffmpeg.*$1$2$3' | grep -v 'grep' | awk '{print $2}'
fi
改進之后:
#!/bin/bash
ffmpegPid = $( ps -ef | grep -E 'ffmpeg.*$1$2$3' | grep -v 'grep' | awk '{print $2}')
if [ -z "$ffmpegPid" ]
then
nohup ffmpeg -re -rtsp_transport tcp -i "rtsp://ip:port/dss/monitor/params?cameraid=$1%24$2&substream=$3" -vcodec libx264 -vprofile baseline -acodec aac -ar 44100 -strict -2 -ac 1 -f flv -s 1280x720 -q 10 $4 > /root/ffmpeg$1$2$3.log 2>&1 &
ps -ef | grep -E 'ffmpeg.*$4*' | grep -v 'grep' | awk '{print $2}'
fi
1.2 關閉進程
#關閉進程
#!/bin/bash
ps -ef | grep -E 'ffmpeg.*$1*' | grep -v 'grep' | awk '{print $2}' | xargs kill
二、java jar包啟動-剔除Pom中依賴
#!/bin/bash
pid=$(ps -ef | grep java | grep -E '*rtsptortmp.*' | awk '{print $2}')
echo "pid = $pid"
if [ $pid ];then
kill -9 $pid
echo "kill the process rtsptortmp pid = $pid"
fi
nohup java -Dloader.path=/root/rtsptortmplib -jar rtsptortmp-1.0-SNAPSHOT.jar --spring.profiles.active=prod > /root/logs/rtsptortmp.log 2>&1 &
tail -f /root/logs/rtsptortmp.log
三、Java jar包通用啟動腳本
eg:
./start.sh java.jar
#!/bin/bash
pid=$(ps -ef | grep java | grep -E '*$1*' | awk '{print $2}')
echo "pid = $pid"
if [ $pid ];then
kill -9 $pid
echo "kill the process pid = $pid"
fi
nohup java -jar -Xms256m -Xmx256m $1 --spring.profiles.active=prod > /root/logs/$1.log 2>&1 &
tail -f /root/logs/$1.log
四、查看centos配置信息腳本
#!/bin/bash
echo ======= cpu個數: =======
grep 'physical id' /proc/cpuinfo | sort -u | wc -l
echo ======= cpu核數: =======
cat /proc/cpuinfo | grep "cpu cores" | uniq
echo ======= cpu型號: =======
cat /proc/cpuinfo | grep 'model name' |uniq
echo ======= cpu內核頻率: =======
cat /proc/cpuinfo |grep MHz|uniq
echo ======= cpu統計信息: =======
lscpu
echo ======= 內存總數: =======
cat /proc/meminfo | grep MemTotal
echo ======= 內核版本: =======
cat /proc/version
echo ======= 操作系統內核信息: =======
uname -a
echo ======= 磁盤信息: =======
fdisk -l
五、Jenkins項目打包發布腳本
#!/bin/bash
//傳入的war包名稱
name=$1
//war包所在目錄
path=$2
//上傳的war包位置
path_w=$3
//如果項目正在運行就殺死進程
if [ -f "$path/$name" ];then
echo "delete the file $name"
rm -f $path/$name
else
echo "the file $name is not exist"
fi
//把jenkins上傳的war包拷貝到我們所在目錄
cp $path_w/$name $path/
echo "copy the file $name from $path_w to $path"
//獲取該項目正在運行的pid
pid=$(ps -ef | grep java | grep $name | awk '{print $2}')
echo "pid = $pid"
//如果項目正在運行就殺死進程
if [ $pid ];then
kill -9 $pid
echo "kill the process $name pid = $pid"
else
echo "process is not exist"
fi
//要切換到項目目錄下才能在項目目錄下生成日志
cd $path
//防止被jenkins殺掉進程 BUILD_ID=dontKillMe
BUILD_ID=dontKillMe
//啟動項目
nohup java -server -Xms256m -Xmx512m -jar -Dserver.port=20000 $name >> nohup.out 2>&1 &
//判斷項目是否啟動成功
pid_new=$(ps -ef | grep java | grep $name | awk '{print $2}')
if [ $? -eq 0 ];then
echo "this application $name is starting pid_new = $pid_new"
else
echo "this application $name startup failure"
fi
echo $! > /var/run/myClass.pid
echo "over"
六、Mysql-使用腳本進行分庫分表備份
[root@ctos3 ~]# cat bak.sh
#!/bin/bash
MYUSER="root"
MYPASS="guoke123"
MYLOG="mysql -u$MYUSER -p$MYPASS -e"
MYDUMP="mysqldump -u$MYUSER -p$MYPASS -x -F"
DBLIST=$($MYLOG "show databases;" | sed 1d | grep -Ev 'info|mysq|per|sys')
DIR=/backup
[ ! -d $DIR ] && mkdir $DIR
cd $DIR
for dbname in $DBLIST
do
TABLIST=$($MYLOG "show tables from $dbname;" | sed 1d)
for tabname in $TABLIST
do
mkdir -p $DIR/$dbname
$MYDUMP $dbname $tabname --events |gzip > $DIR/${dbname}/${tabname}_$(date +%F_%T).sql.gz
done
done
七、實時監控網卡流量的通用腳本
[root@ceph-node1 ~]# cat /root/net_monit.sh
#!/bin/bash
PATH=/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin;
export PATH
function traffic_monitor {
OS_NAME=$(sed -n '1p' /etc/issue)
eth=$1
if [ ! -d /sys/class/net/$eth ];then
echo -e "Network-Interface Not Found"
echo -e "You system have network-interface:\n`ls /sys/class/net`"
exit 5
fi
while [ "1" ]
do
STATUS="fine"
RXpre=$(cat /proc/net/dev | grep $eth | tr : " " | awk '{print $2}')
TXpre=$(cat /proc/net/dev | grep $eth | tr : " " | awk '{print $10}')
sleep 1
RXnext=$(cat /proc/net/dev | grep $eth | tr : " " | awk '{print $2}')
TXnext=$(cat /proc/net/dev | grep $eth | tr : " " | awk '{print $10}')
clear
RX=$((${RXnext}-${RXpre}))
TX=$((${TXnext}-${TXpre}))
if [[ $RX -lt 1024 ]];then
RX="${RX}B/s"
elif [[ $RX -gt 1048576 ]];then
RX=$(echo $RX | awk '{print $1/1048576 "MB/s"}')
$STATUS="busy"
else
RX=$(echo $RX | awk '{print $1/1024 "KB/s"}')
fi
if [[ $TX -lt 1024 ]];then
TX="${TX}B/s"
elif [[ $TX -gt 1048576 ]];then
TX=$(echo $TX | awk '{print $1/1048576 "MB/s"}')
else
TX=$(echo $TX | awk '{print $1/1024 "KB/s"}')
fi
echo -e "==================================="
echo -e "Welcome to Traffic_Monitor stage"
echo -e "version 1.0"
echo -e "Since 2018.7.2"
echo -e "Created by wangshibo"
echo -e "BLOG: http://www.cnblogs.cn/kevingrace"
echo -e "==================================="
echo -e "System: $OS_NAME"
echo -e "Date: `date +%F`"
echo -e "Time: `date +%k:%M:%S`"
echo -e "Port: $1"
echo -e "Status: $STATUS"
echo -e " \t RX \tTX"
echo "------------------------------"
echo -e "$eth \t $RX $TX "
echo "------------------------------"
echo -e "Press 'Ctrl+C' to exit"
done
}
if [[ -n "$1" ]];then
traffic_monitor $1
else
echo -e "None parameter,please add system netport after run the script! \nExample: 'sh traffic_monitor eth0'"
fi
[root@ceph-node1 ~]# chmod 755 /root/net_monit.sh
[root@ceph-node1 ~]# sh /root/net_monit.sh eth0 #eth0是網卡設備名稱,如果是網卡綁定bond0,后面就跟bond0
===================================
Welcome to Traffic_Monitor stage
version 1.0
Since 2018.7.2
Created by wangshibo
BLOG: http://www.cnblogs.cn/kevingrace
===================================
System: CentOS release 6.9 (Final)
Date: 2018-07-02
Time: 15:19:34
Port: eth0
Status: fine
RX TX
------------------------------
eth0 417B/s 390B/s
------------------------------
Press 'Ctrl+C' to exit
===================================
Welcome to Traffic_Monitor stage
version 1.0
Since 2018.7.2
Created by wangshibo
BLOG: http://www.cnblogs.cn/kevingrace
===================================
System: CentOS release 6.9 (Final)
Date: 2018-07-02
Time: 15:19:35
Port: eth0
Status: fine
RX TX
------------------------------
eth0 1.49902KB/s 1.3252KB/s
------------------------------
Press 'Ctrl+C' to exit
八、監控磁盤的監控腳本
[root@ceph-node1 ~]# cat disk_monit.sh
#!/bin/bash
#filename:Monitor_Disk
Monitor_Disk(){
mkdir -p /mnt/Monitor_Disk
fdisk -l|grep "Disk /dev/" | awk '{print $2,$3$4}'|tr -d ',:'>/mnt/Monitor_Disk/device_list.log
N=1;ECHO 90
while read device_line
do
Device=`echo $device_line|awk '{print $1}'`
Sum=`echo $device_line|awk '{print $2}'`
df -h |grep "$Device"|sort>/mnt/Monitor_Disk/${N}_partitions.log
echo
echo "** 第$N塊硬盤($Device):${Sum} **"| grep -E "$Device|$Sum|$N" --color=yes
echo "------------------------------------"
echo -e "linux分區 掛載目錄 總大小 已用 剩余 已用百分比 文件系統 ID system \
塊大小 預留空間 ">/mnt/Monitor_Disk/${N}_Over.log
echo -e "========= ======== ===== === === ========== ======= == ====== \
====== ======== ">>/mnt/Monitor_Disk/${N}_Over.log
Num_Partition=`cat /mnt/Monitor_Disk/${N}_partitions.log|wc -l`
n=0
while read partition_line
do
Partition_Name=`echo $partition_line|awk '{print $1}'`
Mount_Dir=`echo $partition_line|awk '{print $6}'`
Partition_Sum=`echo $partition_line|awk '{print $2}'`
Partition_Used=`echo $partition_line|awk '{print $3}'`
Partition_Leave=`echo $partition_line| awk '{print $4}'`
Partition_Percent=`echo $partition_line|awk '{print $5}'`
Partition_Type=`mount|grep $Partition_Name|awk '{print $5$6}'`
Partition_Id=`fdisk -l | grep $Partition_Name|tr -d '\*'|awk '{print $5}'`
Partition_System=`fdisk -l | grep $Partition_Name|tr -d '\*'|awk '{print $6}'`
Part_Block_Size_B=`tune2fs -l $Partition_Name|grep "Block size"|awk '{print $3}'`
Part_Lift_For_Root_Blocks=`tune2fs -l $Partition_Name|grep "Reserved block count:"|\
awk '{print $4}'`
Part_Block_Size=`echo $Part_Block_Size_B/1024|bc`
Part_Lift_For_Root=`echo ${Part_Lift_For_Root_Blocks}*${Part_Block_Size}/1024|bc`
echo -e "$Partition_Name $Mount_Dir $Partition_Sum $Partition_Used $Partition_Leave \
$Partition_Percent $Partition_Type $Partition_Id $Partition_System \
${Part_Block_Size}K ${Part_Lift_For_Root}M">>/mnt/Monitor_Disk/${N}_Over.log
let n++
[ $n -eq $Num_Partition ]&&(cat /mnt/Monitor_Disk/${N}_Over.log|column -t;echo)
done</mnt/Monitor_Disk/${N}_partitions.log
let N++
done</mnt/Monitor_Disk/device_list.log
ECHO 90
rm -fr /mnt/Monitor_Disk
}
ECHO(){
for ((i=1;i<=$1;i++))
do
echo -n "#"
[ $i -eq $1 ]&&(echo;echo)
done
}
Monitor_Disk
[root@ceph-node1 ~]# chmod 755 disk_monit.sh
[root@ceph-node1 ~]# sh disk_monit.sh
##########################################################################################
** 第1塊硬盤(/dev/sdb):577.4GB **
------------------------------------
linux分區 掛載目錄 總大小 已用 剩余 已用百分比 文件系統 ID system 塊大小 預留空間
========= ======== ===== === === ========== ======= == ====== ====== ========
/dev/sdb1 /data 530G 42G 461G 9% ext4(rw) 83 Linux 4K 27532M
** 第2塊硬盤(/dev/sda):322.1GB **
------------------------------------
linux分區 掛載目錄 總大小 已用 剩余 已用百分比 文件系統 ID system 塊大小 預留空間
========= ======== ===== === === ========== ======= == ====== ====== ========
/dev/sda1 /boot 283M 76M 193M 29% ext4(rw) 83 Linux 1K 15M
/dev/sda3 / 265G 3.1G 248G 2% ext4(rw) 83 Linux 4K 13744M
##########################################################################################
8.1 監控磁盤IO使用率的腳本,實時查看IO使用情況,防止因為磁盤IO效率低而導致MySQL查詢慢的問題。
#!/bin/bash
#監控磁盤IO使用率並告警
#定義收件人郵箱
mail="/usr/local/sbin/mail.py"
mail_user=admin@admin.com
if ! which iostat &>/dev/null
then
yum install -y sysstat
fi
if ! which iotop &>/dev/null
then
yum install -y iotop
fi
logdir=/data/iolog
dt=`date +%F` #以日期作為日志名字
#獲取IO,取5次平均值
get_io()
{
iostat -dx 1 5 > $logdir/iostat.log
sum=0
for ut in `grep "^$1" $logdir/iostat.log |awk '{print $NF}' |cut -d '.' -f 1`
do
sum=$[$sum+$ut]
done
echo $[$sum/5]
}
while ture
do
for d in `iostat -dx |egrep -v '^$|Device:|CPU\}' |awk '{print $1}'
do
io=`get_io $d`
if [ $io -gt 90 ]
then
date >> $logdir/$dt
cat $logdir/iostat.log >> $logdir/$dt
iotop -obn2 >> $logdir/$dt
echo "###################" >> $logdir/$dt
python $mail $mail_user "磁盤IO使用率超過90%" "`cat $logdir/$dt`" 2>/dev/null
fi
done
sleep 10
done
九、指定時間內網站訪問次數的監控
需求說明:
在日常運維工作中,為了防止一些惡意訪問的行為,例如不斷的請求刷流量,通過實時過濾Nginx訪問日志,將單位時間內訪問次數達到指定閥值的來源ip查找出來,並通過郵件報警方式及時通知運維人員!
比如針對url為http://192.168.10.202:8888的訪問進行監控,當在1分鍾內訪問次數超過300次數,就郵件報警給運維人員。
9.1 nginx日志監控腳本
[root@Fastdfs_storage_s1 ~]# cat /opt/nginx_log_monit.sh
#!/bin/bash
#日志文件
logfile=/usr/local/nginx/logs/access.log
#開始時間
start_time=`date -d"$last_minutes minutes ago" +"%H:%M:%S"`
#結束時間
stop_time=`date +"%H:%M:%S"`
#過濾出單位之間內的日志並統計最高ip數
tac $logfile | awk -v st="$start_time" -v et="$stop_time" '{t=substr($4,RSTART+14,21);if(t>=st && t<=et) {print $0}}' \
| awk '{print $1}' | sort | uniq -c | sort -nr > /root/log_ip_top10
ip_top=`cat /root/log_ip_top10 | head -1 | awk '{print $1}'`
# 單位時間[1分鍾]內單ip訪問次數超過300次,則觸發郵件報警
if [[ $ip_top -gt 300 ]];then
/usr/bin/python /opt/send_mail.py &
fi
9.2 python報警腳本
[root@Fastdfs_storage_s1 ~]# cat /opt/send_mail.py
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from datetime import datetime
import os
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
# 郵箱定義
smtp_server = 'smtp.kevin.com'
smtp_port = 465
from_addr = 'monit@kevin.com'
password = os.environ.get('monit@123')
to_addr = ['wangshibo@kevin.com']
# 郵件對象
msg = MIMEMultipart()
msg['From'] = _format_addr('發件人 <%s>' % from_addr)
msg['To'] = _format_addr('收件人 <%s>' % to_addr)
msg['Subject'] = Header('Warning:單ip請求次數異常', 'utf-8').encode()
# 獲取系統中要發送的文本內容
with open('/root/log_ip_top10', 'r') as f:
line = f.readline().strip()
line = line.split(" ")
print(line)
# 郵件正文是MIMEText:
html = '<html><body><h2>一分鍾內單ip請求次數超過閥值</h2>' + \
'<p>ip:%s 請求次數/min:%s</p>' % (line[1],line[0]) + \
'</body></html>'
msg.attach(MIMEText(html, 'html', 'utf-8'))
server = smtplib.SMTP_SSL(smtp_server, smtp_port)
server.login(from_addr, password)
server.sendmail(from_addr, to_addr, msg.as_string())
server.quit()
9.3 寫個測試腳本不停curl請求資源觸發報警
[root@Fastdfs_storage_s1 ~]# cat /opt/curl.sh
#!/bin/bash
#example:curl.sh http://www.kevin.com 100
usage()
{
echo "usage: `basename $0` url count"
}
if [ $# -ne 2 ]; then
usage
exit 1
fi
for i in `seq 1 $2`;do
http_code=`curl -o /dev/null -s -w %{http_code} $1`
echo $1 $http_code
done
手動執行測試腳本
[root@Fastdfs_storage_s1 ~]# /bin/bash /opt/curl.sh http://192.168.10.202:8888 300
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
http://192.168.10.202:8888 200
...........
9.4 定時任務,由於上面腳本是監控一分鍾內的日志,因此每分鍾執行一次
[root@Fastdfs_storage_s1 ~]# crontab -e
* * * * * /bin/bash -x /opt/nginx_log_monit.sh >/dev/null 2>&1
這里僅僅是實現了郵件告警功能,實際上還可以實現自動屏蔽惡意訪問的ip。
可以通過Nginx deny來實現,也可以通過iptables屏蔽("iptables -I INPUT -s x.x.x.x -j DROP"方式)。
十、基於Ping和Telnet/NC的監控腳本案例分析
10.1 通過shell腳本,判斷172.16.60.0/24網絡里,當前在線的ip有哪些?能ping通則認為在線。
[root@python2 ~]# cat /tmp/ip.sh
#!/bin/bash
for i in $(seq 1 254) #這一行或者換成"for i in {1..254}"
do
IP=172.16.60.${i}
ping -c2 ${IP} >/dev/null 2>&1
if [ $? = 0 ];then
echo "${IP} is online!"
else
echo "${IP} is failed"
fi
done
執行腳本:
[root@python2 ~]# sh /tmp/ip.sh
172.16.60.1 is online!
172.16.60.2 is failed
172.16.60.3 is failed
172.16.60.4 is failed
......
案例一:單純地對某些ip進行ping監控
[root@test opt]# cat /opt/hosts_ip_list
192.168.10.10
192.168.10.11
192.168.10.12
192.168.10.13
192.168.10.14
192.168.10.15
192.168.10.16
192.168.10.17
[root@test opt]# cat /opt/hosts_ip_monit.sh
#!/bin/bash
for ip in $(cat /opt/hosts_ip_list)
do
ping -c 1 $ip &>/dev/null #ping 3次,當3次ping都失敗時,則判定此ip網絡通信失敗。
a=$?
sleep 2
ping -c 1 $ip &>/dev/null
b=$?
sleep 2
ping -c 1 $ip &>/dev/null
c=$?
sleep 2
DATE=$(date +%F" "%H:%M)
if [ $a -ne 0 -a $b -ne 0 -a $c -ne 0 ];then
echo -e "Date : $DATE\nHost : $ip\nProblem : Ping is failed."
/bin/sed -i 's/^'$ip'/'#$ip'/g' /etc/hosts
else
echo "$ip ping is successful."
/bin/sed -i 's/^'#$ip'/'$ip'/g' /etc/hosts
fi
done
[root@test opt]# chmod 755 /opt/hosts_ip_monit.sh
[root@test opt]# sh /opt/hosts_ip_monit.sh
Date : 2018-04-24 15:49
Host : 192.168.10.10
Problem : Ping is failed.
Date : 2018-04-24 15:50
Host : 192.168.10.11
Problem : Ping is failed.
192.168.10.12 ping is successful.
192.168.10.13 ping is successful.
192.168.10.14 ping is successful.
192.168.10.15 ping is successful.
192.168.10.16 ping is successful.
Date : 2018-04-24 15:51
Host : 192.168.10.17
Problem : Ping is failed.
案例二:對/etc/hosts列表里的ip映射關系進行ping監控報警
測試系統服務器需要訪問域名www.test.com,該域名解析的DNS地址有很多個,需要在測試系統服務器上的做host綁定。在/etc/hosts文件了做了www.test.com域名的很多綁定,
在域名解析時,會從host綁定配置里從上到下匹配,如果上面綁定的ip不通,則域名解析就會失敗,不會主動去解析到下一個綁定的地址,除非將這個不通的ip綁定注釋掉或刪除掉。
現在要求:
當/etc/hosts文件里綁定的ip出現故障,ping不通的時候,將該ip的綁定自動注釋,並發出郵件報警;如果該ip恢復了正常通信,將自動打開該ip的綁定設置。
[root@cx-app01 ~]# cat /etc/hosts
#192.168.10.10 www.test.com
#192.168.10.11 www.test.com
192.168.10.12 www.test.com
192.168.10.13 www.test.com
192.168.10.14 www.test.com
192.168.10.15 www.test.com
192.168.10.16 www.test.com
#192.168.10.17 www.test.com
[root@cx-app01 ~]# ping www.test.com
PING www.test.com (192.168.10.12) 56(84) bytes of data.
64 bytes from www.test.com (192.168.10.12): icmp_seq=1 ttl=50 time=31.1 ms
64 bytes from www.test.com (192.168.10.12): icmp_seq=2 ttl=50 time=30.7 ms
64 bytes from www.test.com (192.168.10.12): icmp_seq=3 ttl=50 time=30.8 ms
.......
[root@cx-app01 ~]# cat /opt/hosts_ip_list
192.168.10.10
192.168.10.11
192.168.10.12
192.168.10.13
192.168.10.14
192.168.10.15
192.168.10.16
192.168.10.17
[root@cx-app01 ~]# cat /opt/hosts_ip_monit.sh
#!/bin/bash
for ip in $(cat /opt/hosts_ip_list)
do
ping -c 1 $ip &>/dev/null
a=$?
sleep 2
ping -c 1 $ip &>/dev/null
b=$?
sleep 2
ping -c 1 $ip &>/dev/null
c=$?
sleep 2
DATE=$(date +%F" "%H:%M)
if [ $a -ne 0 -a $b -ne 0 -a $c -ne 0 ];then
echo -e "Date : $DATE\nHost : $ip\nProblem : Ping is failed."
cat /etc/hosts|grep "^#$ip"
d=$?
if [ $d -ne 0 ];then
/bin/bash /opt/sendemail.sh zhangsan@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接失敗,現已在/etc/hosts文件里注釋掉該ip的映射關系"
/bin/bash /opt/sendemail.sh lisi@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接失敗,現已在/etc/hosts文件里注釋掉該ip的映射關系"
/bin/bash /opt/sendemail.sh liuwu@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接失敗,現已在/etc/hosts文件里注釋掉該ip的映射關系"
/bin/sed -i 's/^'$ip'/'#$ip'/g' /etc/hosts
else
echo "$ip is not conneted,and it has been done"
fi
else
echo "$ip ping is successful."
cat /etc/hosts|grep "^#$ip"
f=$?
if [ $f -eq 0 ];then
/bin/bash /opt/sendemail.sh zhangsan@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接成功,現已在/etc/hosts文件里恢復該ip的映射關系"
/bin/bash /opt/sendemail.sh lisi@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接成功,現已在/etc/hosts文件里恢復該ip的映射關系"
/bin/bash /opt/sendemail.sh liuwu@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip連接成功,現已在/etc/hosts文件里恢復該ip的映射關系"
/bin/sed -i 's/^'#$ip'/'$ip'/g' /etc/hosts
else
echo "$ip connection has been restored"
fi
fi
done
采用sendemail進行郵件告警發送,sendemail部署參考:http://www.cnblogs.com/kevingrace/p/5961861.html
[root@cx-app01 ~]# cat /opt/sendemail.sh
#!/bin/bash
# Filename: SendEmail.sh
# Notes: 使用sendEmail
#
# 腳本的日志文件
LOGFILE="/tmp/Email.log"
:>"$LOGFILE"
exec 1>"$LOGFILE"
exec 2>&1
SMTP_server='smtp.test.com'
username='monit@test.com'
password='monit@123'
from_email_address='monit@test.com'
to_email_address="$1"
message_subject_utf8="$2"
message_body_utf8="$3"
# 轉換郵件標題為GB2312,解決郵件標題含有中文,收到郵件顯示亂碼的問題。
message_subject_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_subject_utf8
EOF`
[ $? -eq 0 ] && message_subject="$message_subject_gb2312" || message_subject="$message_subject_utf8"
# 轉換郵件內容為GB2312,解決收到郵件內容亂碼
message_body_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_body_utf8
EOF`
[ $? -eq 0 ] && message_body="$message_body_gb2312" || message_body="$message_body_utf8"
# 發送郵件
sendEmail='/usr/local/bin/sendEmail'
set -x
$sendEmail -s "$SMTP_server" -xu "$username" -xp "$password" -f "$from_email_address" -t "$to_email_address" -u "$message_subject" -m "$message_body" -o message-content-type=text -o message-charset=gb2312
每10分鍾定時執行該監控腳本
[root@cx-app01 ~]# crontab -l
*/10 * * * * /bin/bash -x /opt/hosts_ip_monit.sh > /dev/null 2>&1
10.3 案例三:通過nc工具對/etc/hosts列表里的ip的443端口跟本機通信是否正常進行探測
案例二是針對ping編寫的監控腳本,下面介紹下利用nc探測端口通信是否正常的腳本:
探測本機對下面/etc/hosts文件里的ip地址的443端口通信是否正常,如果通信失敗,則發出報警,並在/etc/hosts文件里注釋掉該ip地址的綁定關系。
如果注釋掉的ip的443端口跟本機恢復了通信,則去掉/etc/hosts文件里該ip的注釋!
[root@cx-app01 ~]# cat /etc/hosts
192.168.10.201 www.test.com
192.168.10.205 www.test.com
192.168.10.17 www.test.com
192.168.10.85 www.test.com
192.168.10.176 www.test.com
192.168.10.245 www.test.com
192.168.10.25 www.test.com
192.168.10.47 www.test.com
[root@cx-app01 ~]# cat /opt/hosts_ip_list
192.168.10.201
192.168.10.205
192.168.10.17
192.168.10.85
192.168.10.176
192.168.10.245
192.168.10.25
192.168.10.47
采用nc工具去探測端口是否正常通信(yum install -y nc)
[root@cx-app01 ~]# /usr/bin/nc -z -w 10 192.168.10.201 443
Connection to 192.168.10.201 443 port [tcp/https] succeeded!
針對上面ip列表里的地址,進行批量ip的443端口通信的探測。腳本如下:
[root@cx-app01 ~]# cat /opt/host_ip_nc_monit.sh
#!/bin/bash
for ip in $(cat /opt/hosts_ip_list)
do
echo -e "Date : $DATE\nHost : $ip\nProblem : Port 443 is connected."
cat /etc/hosts|grep "^#$ip"
a=$?
if [ $a -ne 0 ];then
/usr/bin/nc -z -w 10 $ip 443
b=$?
if [ $b -ne 0 ];then
/bin/bash /opt/sendemail.sh wangshibo@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip的443端口連接失敗,現已在/etc/hosts文件里注釋掉該ip的映射關系"
/bin/bash /opt/sendemail.sh linan@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip的443端口連接失敗,現已在/etc/hosts文件里注釋掉該ip的映射關系"
/bin/sed -i 's/^'$ip'/'#$ip'/g' /etc/hosts
else
echo "$HOSTNAME跟$ip的443端口正常連接"
fi
else
/usr/bin/nc -z -w 10 $ip 443
c=$?
if [ $c -eq 0 ];then
/bin/bash /opt/sendemail.sh wangshibo@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip的443端口連接成功,現已在/etc/hosts文件里恢復該ip的映射關系"
/bin/bash /opt/sendemail.sh linan@test.com "測試系統跟www.test.com通信情況" "$HOSTNAME跟$ip的443端口連接成功,現已在/etc/hosts文件里恢復該ip的映射關系"
/bin/sed -i 's/^'#$ip'/'$ip'/g' /etc/hosts
else
echo "$HOSTNAME跟$ip的443端口連接失敗"
fi
fi
done
給腳本賦權
[root@cx-app01 ~]# chmod 755 /opt/host_ip_nc_monit.sh
執行腳本:
[root@cx-app01 ~]# sh /opt/host_ip_nc_monit.sh
Date :
Host : 192.168.10.201
Problem : Port 443 is connected.
Connection to 192.168.10.201 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.201的443端口正常連接
Date :
Host : 192.168.10.205
Problem : Port 443 is connected.
Connection to 192.168.10.205 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.205的443端口正常連接
Date :
Host : 192.168.10.17
Problem : Port 443 is connected.
Connection to 192.168.10.17 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.17的443端口正常連接
Date :
Host : 192.168.10.85
Problem : Port 443 is connected.
Connection to 192.168.10.85 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.85的443端口正常連接
Date :
Host : 192.168.10.176
Problem : Port 443 is connected.
Connection to 192.168.10.176 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.176的443端口正常連接
Date :
Host : 192.168.10.245
Problem : Port 443 is connected.
Connection to 192.168.10.245 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.245的443端口正常連接
Date :
Host : 192.168.10.25
Problem : Port 443 is connected.
Connection to 192.168.10.25 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.25的443端口正常連接
Date :
Host : 192.168.10.47
Problem : Port 443 is connected.
Connection to 192.168.10.47 443 port [tcp/https] succeeded!
cx-app01.veredholdings.cn跟192.168.10.47的443端口正常連接
結合crontab進行計划任務
[root@cx-app01 ~]# crontab -l
*/10 * * * * /bin/bash -x /opt/host_ip_nc_monit.sh > /dev/null 2>&1
十一、監控某個目錄是否被更改
需求:對一個目錄(比如/data/test)進行監控,當這個目錄下文件或子目錄出現變動(如修改、創建、刪除、更名等操作)時,就發送郵件!
針對上面的需求,編寫shell腳本如下:
[root@centos6-vm01 opt]# vim file_monit.sh
#!/bin/bash
#此腳本用於檢測linux系統重要文件是否被改動,如果改動則用郵件報警
#建議用定時任務執行此腳本,如每5分鍾執行一次,也可修改此腳本用於死循環檢測
#Ver:1.0
#http://www.cnblogs.com/kevingrace
#定義驗證文件所在目錄
FileDir='/var/CheckFile'
#獲取主機名或自己定義
HostName=$(hostname)
#定義郵件參數:xmtp地址,發送郵件帳號,發送郵件密碼,接收郵件地址,郵件主題,郵件內容
Mail_Smtp="smtp.wangshibo.com"
Mail_User="notice@wangshibo.com"
Mail_Pass="notice@123"
Mail_From="notice@wangshibo.com"
Mail_To="wangshibo@wangshibo.com"
Mail_Subject="${HostName}:There are changes to system files"
Mail_Conntent="${HostName}:There are changes to system files"
#定義需要驗證的文件目錄。這里我監控的是/data/test目錄
CheckDir=(
/data/test
)
#生成所定義需驗證的文件樣本日志函數
OldFile () {
for i in ${CheckDir[@]}
do
/bin/find ${i} -type f |xargs md5sum >> ${FileDir}/old.log
done
}
NewFile () {
for i in ${CheckDir[@]}
do
/bin/find ${i} -type f |xargs md5sum >> ${FileDir}/new.log
done
}
#生成所定義文件新日志函數(注意后面發送郵件內容,\n表示換行)
SendEMail () {
/usr/local/bin/sendEmail -f $Mail_From -t $Mail_To -s $Mail_Smtp -u $Mail_Subject -xu $Mail_User -xp $Mail_Pass -m "$Mail_Conntent"\n
}
if [ ! -d ${FileDir} ]
then
mkdir ${FileDir}
fi
#假如驗證文件目錄不存在則創建
if [ ! -f ${FileDir}/old.log ]
then
OldFile
fi
#假如沒有安裝sendEmail則安裝
if [ ! -f /usr/local/bin/sendEmail ]
then
cd /usr/local/src/
wget http://caspian.dotconf.net/menu/Software/SendEmail/sendEmail-v1.56.tar.gz
tar -xf sendEmail-v1.56.tar.gz
cd sendEmail-v1.56
cp sendEmail /usr/local/bin
chmod 0755 /usr/local/bin/sendEmail
fi
#生成新驗證日志
NewFile
#新驗證日志與樣本日志進行比較
/usr/bin/diff ${FileDir}/new.log ${FileDir}/old.log >${FileDir}/diff.log
Status=$?
#假如比較結果有變化,則發送郵件報警
if [ ${Status} -ne 0 ]
then
Mail_Conntent="$(grep '<' ${FileDir}/diff.log |awk '{print $3}')"
SendEMail
fi
#清除新舊日志,把比較結果進行備份
/bin/mv -f ${FileDir}/diff.log ${FileDir}/diff$(date +%F__%T).log
cat /dev/null > ${FileDir}/old.log
cat /dev/null > ${FileDir}/new.log
#重新生成樣本日志
OldFile
#刪除目錄內30天以前的比較結果備份文件
/bin/find ${FileDir} -type f -mtime +30 |xargs rm -f
確保本機能連上shell腳本中指定的smtp服務器的25好端口
[root@centos6-vm01 opt]# telnet smtp.wangshibo.com 25
Trying 223.252.214.65...
Connected to smtp.wangshibo.com.
Escape character is '^]'.
220 icoremail.net Anti-spam GT for Coremail System (icoremail-gateway-smtp[20170531])
下面開始測試
[root@centos6-vm01 test]# cd /opt/
[root@centos6-vm01 opt]# cd /data/test/
[root@centos6-vm01 test]# ll
total 0
[root@centos6-vm01 test]# mkdir haha
[root@centos6-vm01 test]# echo "123456" > haha/heihei
[root@centos6-vm01 test]# ll
total 4
drwxr-xr-x. 2 root root 4096 Jan 10 01:42 haha
[root@centos6-vm01 test]# echo "abcdef" > test.txt
[root@centos6-vm01 test]# ll
total 8
drwxr-xr-x. 2 root root 4096 Jan 10 01:42 haha
-rw-r--r--. 1 root root 7 Jan 10 01:42 test.txt
執行監控腳本
[root@centos6-vm01 test]# sh -x /opt/file_monit.sh
注意:當首次執行腳本的時候,由於所監控的目錄下的文件沒有變動,所以不會發送郵件!
查看對比后的日志
[root@centos6-vm01 test]# ll -d /var/CheckFile/
drwxr-xr-x. 2 root root 4096 Jan 10 01:44 /var/CheckFile/
[root@centos6-vm01 test]# ll /var/CheckFile/
total 4
-rw-r--r--. 1 root root 0 Jan 10 01:44 diff2018-01-10__01:44:30.log
-rw-r--r--. 1 root root 0 Jan 10 01:44 new.log
-rw-r--r--. 1 root root 166 Jan 10 01:44 old.log
[root@centos6-vm01 test]# cat /var/CheckFile/diff2018-01-10__01\:44\:30.log
[root@centos6-vm01 test]# cat /var/CheckFile/new.log
[root@centos6-vm01 test]# cat /var/CheckFile/old.log
237267ea7fefa88360c22ab6fd582d7e /data/test/.hhhh.swp
5ab557c937e38f15291c04b7e99544ad /data/test/test.txt
f447b20a7fcbf53a5d5be013ea0b15af /data/test/haha/heihei
==============================================================================================================================
現在開始對/data/test目錄下的文件做下變動
[root@centos6-vm01 test]# echo "aaaaaa" >> test.txt
[root@centos6-vm01 test]# touch haha/bobo
[root@centos6-vm01 test]# mkdir heihei
[root@centos6-vm01 test]# ll
total 12
drwxr-xr-x. 2 root root 4096 Jan 10 01:47 haha
drwxr-xr-x. 2 root root 4096 Jan 10 01:47 heihei
-rw-r--r--. 1 root root 14 Jan 10 01:47 test.txt
執行監控腳本
[root@centos6-vm01 test]# sh -x /opt/file_monit.sh
查看對比后的日志
[root@centos6-vm01 test]# ll /var/CheckFile/
total 8
-rw-r--r--. 1 root root 0 Jan 10 01:44 diff2018-01-10__01:44:30.log
-rw-r--r--. 1 root root 179 Jan 10 01:47 diff2018-01-10__01:47:41.log
-rw-r--r--. 1 root root 0 Jan 10 01:47 new.log
-rw-r--r--. 1 root root 221 Jan 10 01:47 old.log
[root@centos6-vm01 test]# cat /var/CheckFile/diff2018-01-10__01\:47\:41.log
2,3c2
< 4533551682ca49b2f9b1f2829bf3b29d /data/test/test.txt
< d41d8cd98f00b204e9800998ecf8427e /data/test/haha/bobo
---
> 5ab557c937e38f15291c04b7e99544ad /data/test/test.txt
[root@centos6-vm01 test]# cat /var/CheckFile/old.log
237267ea7fefa88360c22ab6fd582d7e /data/test/.hhhh.swp
4533551682ca49b2f9b1f2829bf3b29d /data/test/test.txt
d41d8cd98f00b204e9800998ecf8427e /data/test/haha/bobo
f447b20a7fcbf53a5d5be013ea0b15af /data/test/haha/heihei
通過上面的diff日志,可以看到新變動的文件或子目錄已經記錄到日志里了。
查看郵件,就能看到/data/test目錄下變動的文件或子目錄信息了
通過crontab定時任務,每5分鍾執行一次檢查:
[root@centos6-vm01 test]# crontab -e
*/5 * * * * /bin/bash -x /opt/file_monit.sh > /dev/null 2>&1
以上腳本也可以用於檢測linux系統重要文件是否被更改,只需將檢查的目錄由腳本中的/data/test改為/etc即可!
十二、日志文件ERROR監控報警
即對日志文件中的error進行監控,當日志文件中出現error關鍵字時,即可報警!(grep -i error 不區分大小寫進行搜索"error"關鍵字,但是會將包含error大小寫字符的單詞搜索出來)
1)第一類日志
在每天的日志目錄下生產的error日志,此日志文件每天都會自動生成,里面有沒有error日志內容不一定,日志內容寫入不頻繁,日志文件比較小。
舉例說明:
[root@fk-databus01 ~]# ll /data/log/sedsb/20180628/DEJ_0001_error_20180628.0.log
-rw-rw-r-- 1 zquser zquser 63059 Jun 28 15:32 /data/log/sedsb/20180628/DEJ_0001_error_20180628.0.log
采用sendemail發送告警郵件,sendemail安裝參考:http://www.cnblogs.com/kevingrace/p/5961861.html
監控腳本路徑:
[root@fk-databus01 ~]# cd /opt/log_error_script/
[root@fk-databus01 log_error_script]# ll
total 20
-rw-r--r-- 1 root root 3782 Jun 29 12:13 DEJ_0001_error.log
-rwxr-xr-x 1 root root 4274 Jun 29 11:38 prcc_log_error.sh
-rwxr-xr-x 1 root root 1142 Feb 13 10:51 sendemail.sh
監控腳本內容
[root@fk-databus01 log_error_script]# cat prcc_log_error.sh
#!/bin/sh
ERROR_LOG=`/bin/ls /data/log/sedsb/$(date +%Y%m%d)/DEJ_0001_error*`
ERROR_NEW_LOG=/opt/log_error_script/DEJ_0001_error.log
DATE=`date +%Y年%m月%d日%H時%M分%S秒`
HOST=`/bin/hostname`
IP=`/sbin/ifconfig|grep "inet addr"|grep "Bcast"|cut -d":" -f2|awk -F" " '{print $1}'`
ERROR_MESSAGE=$(/bin/grep -A20 "$(grep "ERROR" $ERROR_LOG|tail -1|awk '{print $1,$2,$3,$4}')" $ERROR_LOG)
DIR=/data/log/sedsb/$(date +%Y%m%d)
FILE=/data/log/sedsb/$(date +%Y%m%d)/DEJ_0001_error_$(date +%Y%m%d).0.log
if [ ! -d $DIR ];then
/bin/mkdir $DIR
fi
if [ ! -f $FILE ];then
/bin/touch $FILE
fi
/bin/chown -R zquser.zquser $DIR
sleep 3
if [ ! -f $ERROR_NEW_LOG ];then
/bin/touch $ERROR_NEW_LOG
fi
NUM1=$(/bin/cat $ERROR_LOG|wc -l)
NUM2=$(/bin/cat $ERROR_NEW_LOG|wc -l)
if [ -f $ERROR_LOG ] && [ $NUM1 -ne 0 ] && [ $NUM2 -eq 0 ];then
/bin/bash /opt/log_error_script/sendemail.sh wangshibo@kevin.com "風控系統${HOSTNAME}機器prcc服務日志的error監控" "告警主機:${HOSTNAME} \n告警IP:${IP} \n告警時間:${DATE} \n告警等級:嚴重,抓緊解決啊! \n告警人員:王士博 \n告警詳情:prcc服務日志中出現error了! \n告警日志文件:${ERROR_LOG} \n當前狀態: PROBLEM \n \nerror信息:\n$ERROR_MESSAGE"
/bin/cat $ERROR_LOG > $ERROR_NEW_LOG
fi
/usr/bin/cmp $ERROR_LOG $ERROR_NEW_LOG >/dev/null 2>&1
if [ $? -ne 0 ];then
/bin/bash /opt/log_error_script/sendemail.sh wangshibo@kevin.com "風控系統${HOSTNAME}機器prcc服務日志的error監控" "告警主機:${HOSTNAME} \n告警IP:${IP} \n告警時間:${DATE} \n告警等級:嚴重,抓緊解決啊! \n告警人員:王士博 \n告警詳情:prcc服務日志中出現error了! \n告警日志文件:${ERROR_LOG} \n當前狀態: PROBLEM \n \nerror信息:\n$ERROR_MESSAGE"
/bin/cat $ERROR_LOG > $ERROR_NEW_LOG
fi
結合crontab進行定時監控(每15秒執行一次)
[root@fk-databus01 ~]# crontab -l
#監控pcrr日志的error
* * * * * /bin/bash -x /opt/log_error_script/prcc_log_error.sh >/dev/null 2>&1
* * * * * sleep 15;/bin/bash -x /opt/log_error_script/prcc_log_error.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/prcc_log_error.sh >/dev/null 2>&1
* * * * * sleep 45;/bin/bash -x /opt/log_error_script/prcc_log_error.sh >/dev/null 2>&1
==================================================================================
針對上面腳本中的某些變量說明
[root@fk-databus01 ~]# /bin/ls /data/log/sedsb/$(date +%Y%m%d)/DEJ_0001_error*
/data/log/sedsb/20180629/DEJ_0001_error_20180629.0.log
[root@fk-databus01 ~]# grep "ERROR" /data/log/sedsb/20180629/DEJ_0001_error_20180629.0.log
ERROR DEJ 2018-06-29 12:13:29.191 [pool-4-thread-10] n.s.p.r.thread.OuterCheThdInterface - cx201806291213288440016車300接口異常!
ERROR DEJ 2018-06-29 12:13:29.196 [nioEventLoopGroup-3-12] n.s.p.r.c.MessageControllerImpl - cx201806291213288440016:
[root@fk-databus01 ~]# grep "ERROR" /data/log/sedsb/20180629/DEJ_0001_error_20180629.0.log |tail -1|awk '{print $1,$2,$3,$4}'
ERROR DEJ 2018-06-29 12:13:29.196
[root@fk-databus01 ~]# /bin/grep -A20 "$(grep "ERROR" /data/log/sedsb/20180629/DEJ_0001_error_20180629.0.log |tail -1|awk '{print $1,$2,$3,$4}')" /data/log/sedsb/20180629/DEJ_0001_error_20180629.0.log
ERROR DEJ 2018-06-29 12:13:29.196 [nioEventLoopGroup-3-12] n.s.p.r.c.MessageControllerImpl - cx201806291213288440016:
net.sinocredit.pre.rcc.utils.exception.OuterDataException: 外部數據:cheFixPrice:mile里程 is null;
at net.sinocredit.pre.rcc.datafactory.OuterDataProcess.execute(OuterDataProcess.java:51)
at net.sinocredit.pre.rcc.datafactory.OuterDataProcess.execute(OuterDataProcess.java:23)
at net.sinocredit.pre.rcc.service.getOtherDataService.MessageServiceImpl.getOrderData(MessageServiceImpl.java:34)
at net.sinocredit.pre.rcc.controller.MessageControllerImpl.divMessage(MessageControllerImpl.java:110)
at net.sinocredit.pre.rcc.handler.ServerHandler.channelRead(ServerHandler.java:28)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:351)
at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:351)
at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:293)
at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:267)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:359)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:351)
at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:373)
報警郵件效果如下:
**
2)第二類日志
此日志是固定文件,日志內容寫入頻繁,日志文件比較大。對於此文件的監控,通過采用tail -1000方式獲取日志文件的最新1000行的方式進行error監控!
舉例說明:
[root@fk-zqjcweb01 ~]# ll /data/log/decision/decision.log
-rw-rw-r-- 1 zquser zquser 5108 Jun 28 16:02 /data/log/decision/decision.log
采用sendemail發送告警郵件,sendemail安裝參考:http://10.0.8.50/software/sendemail_install.sh
監控腳本路徑:
[root@fk-zqjcweb01 ~]# cd /opt/log_error_script/
[root@fk-zqjcweb01 log_error_script]# ls
decision sendemail.sh
[root@fk-zqjcweb01 log_error_script]# ls decision/
decision.log_diff_error.log decision.log_error.log decision.log_monit.sh
腳本內容:
[root@fk-zqjcweb01 log_error_script]# cat decision/decision.log_monit.sh
#!/bin/sh
ERROR_LOG=/data/log/decision/decision.log
ERROR_NEW_LOG=/opt/log_error_script/decision/decision.log_error.log
ERROR_DIFF_LOG=/opt/log_error_script/decision/decision.log_diff_error.log
DATE=`date +%Y年%m月%d日%H時%M分%S秒`
HOST=`/bin/hostname`
IP=`/sbin/ifconfig|grep "inet addr"|grep "Bcast"|cut -d":" -f2|awk -F" " '{print $1}'`
if [ ! -f $ERROR_NEW_LOG ];then
/bin/touch $ERROR_NEW_LOG
fi
NUM1=$(/usr/bin/tail -1000 $ERROR_LOG|grep error|wc -l)
NUM2=$(/bin/cat $ERROR_NEW_LOG|wc -l)
if [ -f $ERROR_LOG ] && [ $NUM1 -ne 0 ] && [ $NUM2 -eq 0 ];then
/bin/bash /opt/log_error_script/sendemail.sh wangshibo@kevin.com "風控系統${HOSTNAME}機器的decision.log日志中的error監控" "告警主機:${HOSTNAME} \n告警IP:${IP} \n告警時間:${DATE} \n告警等級:嚴重,抓緊解決啊! \n告警人員:王士博 \n告警詳情:decision.log日志中出現error了! \n告警日志文件:${ERROR_LOG} \n當前狀態: PROBLEM "
/usr/bin/tail -1000 $ERROR_LOG|grep error > $ERROR_NEW_LOG
fi
/usr/bin/tail -1000 $ERROR_LOG|grep error > $ERROR_DIFF_LOG
/usr/bin/cmp $ERROR_DIFF_LOG $ERROR_NEW_LOG >/dev/null 2>&1
if [ $? -ne 0 ];then
/bin/bash /opt/log_error_script/sendemail.sh wangshibo@kevin.com "風控系統${HOSTNAME}機器的decision.log日志中的error監控" "告警主機:${HOSTNAME} \n告警IP:${IP} \n告警時間:${DATE} \n告警等級:嚴重,抓緊解決啊! \n告警人員:王士博 \n告警詳情:decision.log日志中出現error了! \n告警日志文件:${ERROR_LOG} \n當前狀態: PROBLEM "
/usr/bin/tail -1000 $ERROR_LOG|grep error > $ERROR_NEW_LOG
fi
You have new mail in /var/spool/mail/root
結合crontab進行定時監控
[root@fk-zqjcweb01 log_error_script]# crontab -l
#decision.log日志的error監控
* * * * * /bin/bash -x /opt/log_error_script/decision/decision.log_monit.sh >/dev/null 2>&1
* * * * * sleep 15;/bin/bash -x /opt/log_error_script/decision/decision.log_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/decision/decision.log_monit.sh >/dev/null 2>&1
* * * * * sleep 45;/bin/bash -x /opt/log_error_script/decision/decision.log_monit.sh >/dev/null 2>&1
上面提到的sendemail.sh郵件發送腳本
[root@fk-zqjcweb01 ~]# cat /opt/log_error_script/sendemail.sh
#!/bin/bash
# Filename: SendEmail.sh
# Notes: 使用sendEmail
#
# 腳本的日志文件
LOGFILE="/tmp/Email.log"
:>"$LOGFILE"
exec 1>"$LOGFILE"
exec 2>&1
SMTP_server='smtp.kevin.com'
username='monit@kevin.com'
password='monit@123'
from_email_address='monit@kevin.com'
to_email_address="$1"
message_subject_utf8="$2"
message_body_utf8="$3"
# 轉換郵件標題為GB2312,解決郵件標題含有中文,收到郵件顯示亂碼的問題。
message_subject_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_subject_utf8
EOF`
[ $? -eq 0 ] && message_subject="$message_subject_gb2312" || message_subject="$message_subject_utf8"
# 轉換郵件內容為GB2312,解決收到郵件內容亂碼
message_body_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_body_utf8
EOF`
[ $? -eq 0 ] && message_body="$message_body_gb2312" || message_body="$message_body_utf8"
# 發送郵件
sendEmail='/usr/local/bin/sendEmail'
set -x
$sendEmail -s "$SMTP_server" -xu "$username" -xp "$password" -f "$from_email_address" -t "$to_email_address" -u "$message_subject" -m "$message_body" -o message-content-type=text -o message-charset=gb2312
3)第三類日志
日志規則說明:
- 在etl服務器下的EXP、MDB、MID、ODB、PDB、PUS、SDB系統里有很多任務日志,日志都存放在當天的日期目錄下。
- 現在需要對這些任務日志的error進行監控,當出現error報錯信息時立刻發出報警!
- 當這些任務日志文件里有出現error報錯信息的,那么該任務日志文件就不會被寫入了。也就是說一個任務日志文件只會出現一次error報錯。
- 出現error報錯信息的任務日志不能刪除和更改,因為這些任務日志會被其他程序調用展示。
[root@bigdata-etl01 ~]# ll /data/etluser/LOG/
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 EXP
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 MDB
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 MID
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 ODB
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 PDB
drwxrwx--- 32 etluser etluser 4096 Jul 6 00:47 PUS
drwxrwx--- 33 etluser etluser 4096 Jul 6 02:00 SDB
[root@bigdata-etl01 ~]# ls /data/etluser/LOG/EXP/
20180606 20180609 20180612 20180615 20180618 20180621 20180624 20180627 20180630 20180703 20180706
20180607 20180610 20180613 20180616 20180619 20180622 20180625 20180628 20180701 20180704
20180608 20180611 20180614 20180617 20180620 20180623 20180626 20180629 20180702 20180705
[root@bigdata-etl01 ~]# ls /data/etluser/LOG/EXP/20180706/
EXP_EXP_V_CUST_CRDT_SITU_20180705[1][1].54.log exp_v_opr_size_curr_stats_0010601[1].pl.56.log
EXP_EXP_V_DAILY_BIZ_AMT_SITU_20180705[1][1].45.log exp_v_opr_size_curr_stats_0010602[1].pl.56.log
EXP_EXP_V_MATR_RMND_INTFC_QG6_001_20180705[1][1].83.log exp_v_prvs_provs_int_intfc_f0_0010600[1].pl.103.log
EXP_EXP_V_OPR_SIZE_CURR_STATS_001_20180705[1][1].56.log exp_v_prvs_provs_int_intfc_f0_0010601[1].pl.103.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_001_20180705[1][1].103.log exp_v_prvs_provs_int_intfc_f0_0020600[1].pl.98.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_002_20180705[1][1].98.log exp_v_prvs_provs_int_intfc_f0_0020601[1].pl.98.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_003_20180705[1][1].90.log exp_v_prvs_provs_int_intfc_f0_0030600[1].pl.90.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_007_20180705[1][1].48.log exp_v_prvs_provs_int_intfc_f0_0030601[1].pl.90.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_008_20180705[1][1].78.log exp_v_prvs_provs_int_intfc_f0_0070600[1].pl.48.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_009_20180705[1][1].15.log exp_v_prvs_provs_int_intfc_f0_0070601[1].pl.48.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F0_010_20180705[1][1].48.log exp_v_prvs_provs_int_intfc_f0_0080600[1].pl.78.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F1_004_20180705[1][1].16.log exp_v_prvs_provs_int_intfc_f0_0080601[1].pl.78.log
EXP_EXP_V_PRVS_PROVS_INT_INTFC_F1_006_20180705[1][1].8.log exp_v_prvs_provs_int_intfc_f0_0090600[1].pl.15.log
EXP_EXP_V_QRY_FACT_AGT_INTFC_20180705[1][1].47.log exp_v_prvs_provs_int_intfc_f0_0090601[1].pl.15.log
exp_v_cust_crdt_situ0600[1].pl.54.log exp_v_prvs_provs_int_intfc_f0_0100600[1].pl.48.log
exp_v_cust_crdt_situ0601[1].pl.54.log exp_v_prvs_provs_int_intfc_f0_0100601[1].pl.48.log
exp_v_cust_crdt_situ0602[1].pl.54.log exp_v_prvs_provs_int_intfc_f1_0040600[1].pl.16.log
exp_v_daily_biz_amt_situ0600[1].pl.45.log exp_v_prvs_provs_int_intfc_f1_0040601[1].pl.16.log
exp_v_daily_biz_amt_situ0601[1].pl.45.log exp_v_prvs_provs_int_intfc_f1_0060600[1].pl.8.log
exp_v_daily_biz_amt_situ0602[1].pl.45.log exp_v_prvs_provs_int_intfc_f1_0060601[1].pl.8.log
exp_v_matr_rmnd_intfc_qg6_0010600[1].pl.83.log exp_v_qry_fact_agt_intfc0600[1].pl.47.log
exp_v_matr_rmnd_intfc_qg6_0010601[1].pl.83.log exp_v_qry_fact_agt_intfc0601[1].pl.47.log
exp_v_matr_rmnd_intfc_qg6_0010602[1].pl.83.log exp_v_qry_fact_agt_intfc0602[1].pl.47.log
exp_v_opr_size_curr_stats_0010600[1].pl.56.log
監控腳本思路:
1)對這些任務日志進行批量搜索error關鍵字(不區分大小寫)
2)將出現error關鍵字的任務日志拷貝到一個專門的目錄下(error日志文件的列表目錄)。
3)對搜索到error關鍵字的任務日志做判斷,判斷它是否存在於那個列表目錄下:
如果不存在,說明是新出現error的日志文件,就立刻報警!
如果存在,說明出現的error是之前的信息,不報警!
監控腳本編寫情況如下:
error_log為error日志文件的列表目錄;
sendemail.sh為郵件發送腳本(上面介紹過)
[root@bigdata-etl01 log_error_script]# ls
EXP MDB MID ODB PDB PUS SDB sendemail.sh
[root@bigdata-etl01 log_error_script]# ls EXP/
error_log EXP_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls MDB/
error_log MDB_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls MID/
error_log MID_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls ODB/
error_log ODB_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls PDB/
error_log PDB_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls PUS/
error_log PUS_error_monit.sh
[root@bigdata-etl01 log_error_script]# ls SDB/
error_log SDB_error_monit.sh
[root@bigdata-etl01 log_error_script]#
這里貼出SDB系統的任務日志的error監控報警腳本(其他幾個系統的監控腳本與這個一樣,只需要將腳本中的SDB替換成對應的系統名稱即可!)
[root@bigdata-etl01 log_error_script]# cat /opt/log_error_script/SDB/SDB_error_monit.sh
#!/bin/sh
DATE_DIR=$(date +%Y%m%d)
DATE=`date +%Y年%m月%d日%H時%M分%S秒`
HOST=`/bin/hostname`
IP=`/sbin/ifconfig|grep "inet addr"|grep "Bcast"|cut -d":" -f2|awk -F" " '{print $1}'`
cd /data/etluser/LOG/SDB
if [ ! -d $DATE_DIR ];then
/bin/mkdir $DATE_DIR
/bin/chown -R etluser.etluser $DATE_DIR
fi
cd /data/etluser/LOG/SDB/$DATE_DIR
for FILE in $(/bin/ls *.log)
do
NUM=$(/bin/grep -i "error" /data/etluser/LOG/SDB/$DATE_DIR/$FILE|wc -l)
ERROR_MESSAGE=$(/bin/grep -i "error" /data/etluser/LOG/SDB/$DATE_DIR/$FILE)
if [ $NUM -ne 0 ];then
/bin/ls /opt/log_error_script/SDB/error_log/$FILE
a=$?
if [ $a -ne 0 ];then
/opt/log_error_script/sendemail.sh wangshibo@test.com "大數據平台etl服務器${HOSTNAME}的SDB任務日志里出現error了" "告警主機:${HOSTNAME} \n告警IP:${IP} \n告警時間:${DATE} \n告警等級:嚴重 \n告警人員:王士博 \n告警詳情:SDB的任務日志里出現error了,抓緊解決啊! \n當前狀態: PROBLEM \n告警日志文件:/data/etluser/LOG/SDB/$DATE_DIR/$FILE \n\n\n------請看下面error報錯信息------- \nerror信息:\n$ERROR_MESSAGE"
cp /data/etluser/LOG/SDB/$DATE_DIR/$FILE /opt/log_error_script/SDB/error_log/
else
echo "$FILE日志中error報錯信息是之前發生的,無需報警!"
fi
else
echo "$FILE 日志里沒有error報錯啦"
fi
done
給腳本賦予執行權限
[root@bigdata-etl01 log_error_script]# chmod 755 /opt/log_error_script/SDB/SDB_error_monit.sh
[root@bigdata-etl01 log_error_script]# sh /opt/log_error_script/SDB/SDB_error_monit.sh
qbl_biz_cst_bsc_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_cst_bsc_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_fnc_bsc_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_fnc_bsc_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_fnc_mod_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_fnc_mod_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_pd_bsc_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_pd_bsc_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_pre_ctr_bsc_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_pre_ctr_bsc_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_repy_base_inf0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_repy_base_inf0101[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_repy_pl_dtl0100[1].pl.78.log 日志里沒有error報錯啦
qbl_biz_repy_pl_dtl0101[1].pl.78.log 日志里沒有error報錯啦
qbl_biz_repy_pl_inf0100[1].pl.78.log 日志里沒有error報錯啦
qbl_biz_repy_pl_inf0101[1].pl.78.log 日志里沒有error報錯啦
qbl_biz_repy_rcrd_jrnl0100[1].pl.73.log 日志里沒有error報錯啦
qbl_biz_repy_rcrd_jrnl0101[1].pl.73.log 日志里沒有error報錯啦
.......
.......
結合crontab指定腳本定時執行任務(每30秒執行一次)
[root@bigdata-etl01 ~]# crontab -l
#etl相關任務日志的error監控報警
* * * * * /bin/bash -x /opt/log_error_script/EXP/EXP_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/EXP/EXP_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/MDB/MDB_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/MDB/MDB_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/MID/MID_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/MID/MID_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/ODB/ODB_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/ODB/ODB_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/PDB/PDB_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/PDB/PDB_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/PUS/PUS_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/PUS/PUS_error_monit.sh >/dev/null 2>&1
* * * * * /bin/bash -x /opt/log_error_script/SDB/SDB_error_monit.sh >/dev/null 2>&1
* * * * * sleep 30;/bin/bash -x /opt/log_error_script/SDB/SDB_error_monit.sh >/dev/null 2>&1
郵件報警效果如下:
如上可以看出,SDB系統的任務日志里發現了error信息,現在去日志列表目錄里查看下,發現出現error信息的任務日志已經拷貝到列表目錄里了。
當下次腳本執行,搜索到這些日志發現error時就會去做判斷,判斷這些日志是否存在列表目錄里,如果出現,就不會再次發出報警。
[root@bigdata-etl01 ~]# ll /opt/log_error_script/SDB/error_log/
total 12
-rw-r--r-- 1 root root 1978 Jul 6 10:36 SDB_QCX_CUSTOMER_INFO_20180705[1][1].73.log
-rw-r--r-- 1 root root 1939 Jul 6 10:19 SDB_QCX_FTTYPE_STC_20180705[1][1].51.log
-rw-r--r-- 1 root root 1939 Jul 6 10:19 SDB_QCX_SETTLE_STC_20180705[1][1].17.log
十三、網站訪問狀態和超時時間監控報警設置
對網站訪問狀態和超時時間進行監控:當code狀態為5xx或者訪問超時時間大於10s時進行報警。腳本腳本如下:
[root@qd-inf-logcollector01 web_monit]$ pwd
/app/script/web_monit
[root@qd-inf-logcollector01 web_monit]$ ll
total 12
-rwxr-xr-x 1 root root 870 Oct 12 21:34 http_monit.sh //監控腳本
-rwxr-xr-x 1 root root 857 Oct 12 21:25 sms.py //短信報警腳本,里面有報警聯系人
-rw-r--r-- 1 root root 377 Oct 12 21:27 weblist.txt //監控的網站域名列表
[root@qd-inf-logcollector01 web_monit]$ cat http_monit.sh
#!/bin/sh
weblist=/app/script/web_monit/weblist.txt
for list in `cat $weblist|grep -E -v "#|^$"`
do
httpcode=`curl -o /dev/null -s -w %{http_code} "$list"`
httptime=`curl -o /dev/null -s -w "time_connect: %{time_connect}\ntime_starttransfer: %{time_starttransfer}\ntime_total: %{time_total}\n" "$list"|grep time_total|awk -F ":" '{print $2*1000}'`
if [ $httpcode = 500 ]||[ $httpcode = 502 ]||[ $httpcode = 503 ]||[ $httpcode = 504 ]
then
python /app/script/web_monit/sms.py $list "$list 訪問有誤!狀態碼為$httpcode!請收到報警后盡快查看並處理!"
else
echo "$list is checked ok!"
fi
if [ $httptime -ge 10000 ]
then
python /app/script/web_monit/sms.py $list " $list訪問超時!超時時間為$httptime毫秒!請收到報警后盡快查看並處理!"
else
echo "$list is connect ok!"
fi
done
手動檢查網站訪問的code狀態碼
[root@qd-inf-logcollector01 web_monit]$ curl -o /dev/null -s -w %{http_code} http://www.wang.com
200
手動檢查網站訪問的超時時間(單位:毫秒,如下網址訪問的時間為0.8秒)
[root@qd-inf-logcollector01 web_monit]$ curl -o /dev/null -s -w "time_connect: %{time_connect}\ntime_starttransfer: %{time_starttransfer}\ntime_total: %{time_total}\n" http://www.wang.com |grep time_total|awk -F ":" '{print $2*1000}'
800
網站列表和腳本執行
[root@qd-inf-logcollector01 web_monit]$cat weblist.txt
http://nop.kevin.cn
http://ap.kevin.cn
http://ope.kevin.cn
http://opr.kevin.cn
http://www.kevin.cn
http://kevin.cn
http://tb.kevin.cn
http://www.wang.com
https://www.wang.com
http://doc.kevin.cn
http://docs.kevin.cn
http://git.wang.com
http://monitor.kevin.cn
http://dash.kevin.cn
[root@qd-inf-logcollector01 web_monit]$sh http_monit.sh
http://nop.kevin.cn is checked ok!
http://nop.kevin.cn is connect ok!
http://ap.kevin.cn is checked ok!
http://ap.kevin.cn is connect ok!
http://ope.kevin.cn is checked ok!
http://ope.kevin.cn is connect ok!
http://opr.kevin.cn is checked ok!
http://opr.kevin.cn is connect ok!
http://www.kevin.cn is checked ok!
http://www.kevin.cn is connect ok!
http://kevin.cn is checked ok!
http://kevin.cn is connect ok!
http://tb.kevin.cn is checked ok!
http://tb.kevin.cn is connect ok!
http://www.wang.com is checked ok!
http://www.wang.com is connect ok!
https://www.wang.com is checked ok!
https://www.wang.com is connect ok!
http://doc.kevin.cn is checked ok!
http://doc.kevin.cn is connect ok!
http://docs.kevin.cn is checked ok!
http://docs.kevin.cn is connect ok!
http://git.wang.com is checked ok!
http://git.wang.com is connect ok!
http://monitor.kevin.cn is checked ok!
http://monitor.kevin.cn is connect ok!
http://dash.kevin.cn is checked ok!
http://dash.kevin.cn is connect ok!
定時監控任務(每兩分鍾監控一次)
[root@qd-inf-logcollector01 web_monit]$ crontab -l
*/2 * * * * /bin/bash -x /app/script/web_monit/http_monit.sh > /dev/null 2>&1
簡單注意下:比較運算符只有==和!=是用於字符串比較的,不可用於整數比較;整數比較只能使用-eq, -gt這種形式
十四、服務器磁盤監控腳本分享(含報警郵件)
在日常的運維工作中,我們經常會對服務器的磁盤使用情況進行巡檢,以防止磁盤爆滿導致的業務故障。如果能編寫一個合理完善的監控腳本,當磁盤使用率達到我們設置的閥值時,就自動發送報警郵件,以便我們及時獲悉到快爆滿的磁盤情況!
下面分享一個腳本:
監控本機的根磁盤和home盤,當根磁盤使用率達到90%和home磁盤使用率達到95%的時候,發報警郵件至wangshibo@kevin.cn和liugang@kevin.cn
[root@kevin ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 50G 46G 12G 90% /
tmpfs 32G 68K 32G 1% /dev/shm
/dev/sda1 485M 40M 421M 9% /boot
/dev/mapper/VolGroup-lv_home 836G 795G 673G 95% /home
取根磁盤當前利用率的百分值
[root@kevin ~]# /bin/df -h|grep /dev/mapper/VolGroup-lv_root|awk -F" " '{print $5}'|cut -d"%" -f1
90
取home盤當前利用率的百分值
[root@kevin ~]# /bin/df -h|grep /dev/mapper/VolGroup-lv_home|awk -F" " '{print $5}'|cut -d"%" -f1
95
編寫郵件報警腳本
[root@kevin ~]# vim /root/root_disk.sh
#!/bin/bash
SERVER_IP=`ifconfig|grep 192.168.1|awk -F":" '{print $2}'|cut -d" " -f1`
ROOT_DISK=`/bin/df -h|grep /dev/mapper/VolGroup-lv_root|awk -F" " '{print $5}'|cut -d"%" -f1`
HOME_DISK=`/bin/df -h|grep /dev/mapper/VolGroup-lv_home|awk -F" " '{print $5}'|cut -d"%" -f1`
if [ $ROOT_DISK -ge 90 ];then
/usr/local/bin/sendEmail -f ops@kevin.cn -t wangshibo@kevin.cn -s smtp.kevin.cn -u " The ROOT_DISK of $SERVER_IP-$HOSTNAME is warning!" -o message-content-type=html -o message-charset=utf8 -xu ops@kevin.cn -xp zh@123bj -m "The ROOT_DISK of $SERVER_IP-$HOSTNAME,now use% is 90%,please deal with it as soon as possible"
/usr/local/bin/sendEmail -f ops@kevin.cn -t liugang@kevin.cn -s smtp.kevin.cn -u " The ROOT_DISK of $SERVER_IP-$HOSTNAME is warning!" -o message-content-type=html -o message-charset=utf8 -xu ops@kevin.cn -xp zh@123bj -m "The ROOT_DISK of $SERVER_IP-$HOSTNAME,now use% is 90%,please deal with it as soon as possible"
else
echo "The ROOT_DISK of $SERVER_IP-$HOSTNAME is Enough to use"
fi
sleep 5
if [ $HOME_DISK -ge 95 ];then
/usr/local/bin/sendEmail -f ops@kevin.cn -t wangshibo@kevin.cn -s smtp.kevin.cn -u " The HOME_DISK of $SERVER_IP-$HOSTNAME is warning!" -o message-content-type=html -o message-charset=utf8 -xu ops@kevin.cn -xp zh@123bj -m "The HOME_DISK of $SERVER_IP-$HOSTNAME,now use% is 95%,please deal with it as soon as possible"
/usr/local/bin/sendEmail -f ops@kevin.cn -t liugang@kevin.cn -s smtp.kevin.cn -u " The HOME_DISK of $SERVER_IP-$HOSTNAME is warning!" -o message-content-type=html -o message-charset=utf8 -xu ops@kevin.cn -xp zh@123bj -m "The HOME_DISK of $SERVER_IP-$HOSTNAME,now use% is 95%,please deal with it as soon as possible"
else
echo "The ROOT_DISK of $SERVER_IP-$HOSTNAME is Enough to use"
fi
===============================================================
設置計划任務
[root@kevin ~]# crontab -e
*/30 * * * * /bin/bash -x /root/root_disk.sh > /dev/null 2>&1
上面腳本中的郵件報警用的是sendemail,需要提前安裝sendemail環境,安裝操作如下:
1)先下載安裝包到本地,解壓。
[root@kevin ~]# cd /usr/local/src/
[root@kevin src]# wget -c http://caspian.dotconf.net/menu/Software/SendEmail/sendEmail-v1.56.tar.gz
[root@kevin src]# tar -zvxf sendEmail-v1.56.tar.gz
[root@kevin src]# cd sendEmail-v1.56
[root@kevin sendEmail-v1.56]# cp -a sendEmail /usr/local/bin/
[root@kevin sendEmail-v1.56]# chmod +x /usr/local/bin/sendEmail
[root@kevin sendEmail-v1.56]# file /usr/local/bin/sendEmail
/usr/local/bin/sendEmail: a /usr/bin/perl -w script text executable
2)安裝下依賴
[root@kevin sendEmail-v1.56]# yum install perl-Net-SSLeay perl-IO-Socket-SSL -y
[root@kevin sendEmail-v1.56]# /usr/local/bin/sendEmail -f from@kevin.cn -t to@kevin.cn -s smtp.kevin.cn -u "我是郵件主題" -o message-content-type=html -o message-charset=utf8 -xu from@kevin.cn -xp zh@123bj -m "我是郵件內容"
命令說明:
/usr/local/bin/sendEmail #命令主程序
-f from@kevin.cn #發件人郵箱
-t to@kevin.cn #收件人郵箱
-s smtp.huanqi.cn #發件人郵箱的smtp服務器
-u "我是郵件主題" #郵件的標題
-o message-content-type=html #郵件內容的格式,html表示它是html格式
-o message-charset=utf8 #郵件內容編碼
-xu from@kevin.cn #發件人郵箱的用戶名
-xp zh@123bj #發件人郵箱密碼
-m "我是郵件內容" #郵件的具體內容
例如:
[root@kevin alertscripts]# /usr/local/bin/sendEmail -f ops@kevin.cn -t wangshibo@kevin.cn -s smtp.kevin.cn -u "我是郵件主題" -o message-content-type=html -o message-charset=utf8 -xu ops@kevin.cn -xp zh@123bj -m "我是郵件內容"
Oct 14 19:38:29 kevin sendEmail[65454]: Email was sent successfully!
登陸wangshibo@kevin.cn郵箱,發現已經收到了上面發送的郵件:
十五、業務日志清理腳本
線上某些系統業務跑一段時間后,日志就會越來越多,考慮到業務機器磁盤有限,需要添加業務日志清理功能。根據日志所在分區磁盤使用情況來判斷是否清理日志,比如當日志分區磁盤空間使用超過90%時,將一周前的日志打包轉移到別處 (別的分區下或遠程存儲設備上)。腳本 (/opt/script/log_clear.sh) 如下:
#!/bin/bash
#定義日志所在分區當前空間所占比例數(去掉%)。grep -w表示精准匹配,只匹配"/"這個分區
LOG_PARTITION=$(`which df` -h|awk '{print $5,$6}'|grep -w "/"|cut -d" " -f1|awk -F"%" '{print $1}')
#定義一周前的日期,用於日志分區空間超過設定的閾值后保留的份數(這里保留一周的日志)
KEEP_DATE=`date -d '-7 days' +%Y%m%d`
#定義日志路徑
LOG_DIR=/opt/log/kevin
#定義日志備份路徑(即當日志分區空間超過閾值后,將日志轉移到該處。這里就轉移到了本地home分區下,也可以轉移到別的機器或遠程存儲設備上)
LOG_BACK_DIR=/home/log/kevin
#確保日志備份路徑存在,如果不存在,則創建該路徑。
if [ ! -d ${LOG_BACK_DIR} ];then
`which mkdir` -p ${LOG_BACK_DIR}
else
echo "${LOG_BACK_DIR} is exist" >/dev/null 2>&1
fi
#當日志分區當前空間超過90%時執行的動作
if [ ${LOG_PARTITION} -ge 90 ];then
#切換到日志路徑下
cd ${LOG_DIR}
#截取日志文件中的日期,比如stepweb_20190915.log日志文件,則截取20190915
for LOG_DATE in $(ls -l ${LOG_DIR}|awk '{print $9}'|cut -d"_" -f2|awk -F"." '{print $1}')
do
#當日志文件中截取的日期是一周之前的日志時執行的動作
if [ ${LOG_DATE} -lt ${KEEP_DATE} ];then
#打包,轉移,刪除
#注意這里tar使用-P參數,因為tar默認為相對路徑,使用絕對路徑的話會報錯"Removing leading `/’ from member names",使用-P參數就解決了該問題
`which tar` -zvPcf ${LOG_DIR}/stepweb_${LOG_DATE}.log.tar.gz ${LOG_DIR}/stepweb_${LOG_DATE}.log
mv ${LOG_DIR}/stepweb_${LOG_DATE}.log.tar.gz ${LOG_BACK_DIR}/
rm -rf ${LOG_DIR}/stepweb_${LOG_DATE}.log
else
echo "keep stepweb_${LOG_DATE}.log" >/dev/null 2>&1
fi
done
else
echo "${LOG_PARTITION} is available" >/dev/null 2>&1
fi
根據上面腳本驗證下:
查看本機磁盤使用情況,日志所在分區磁盤當前使用率達到92%了
[root@yyweb kevin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 50G 46G 4G 92% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 34M 3.8G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/sda1 1014M 183M 832M 19% /boot
/dev/mapper/centos-home 142G 33M 142G 1% /home
日志所在路徑
[root@yyweb kevin]# pwd
/opt/log/kevin
日志情況
[root@yyweb kevin]# ls
stepweb_20190810.log stepweb_20190817.log stepweb_20190824.log stepweb_20190901.log stepweb_20190908.log stepweb_20190915.log
stepweb_20190811.log stepweb_20190818.log stepweb_20190825.log stepweb_20190902.log stepweb_20190909.log stepweb_20190916.log
stepweb_20190812.log stepweb_20190819.log stepweb_20190826.log stepweb_20190903.log stepweb_20190910.log stepweb_20190917.log
stepweb_20190813.log stepweb_20190820.log stepweb_20190827.log stepweb_20190904.log stepweb_20190911.log stepweb_20190918.log
stepweb_20190814.log stepweb_20190821.log stepweb_20190828.log stepweb_20190905.log stepweb_20190912.log stepweb_20190919.log
stepweb_20190815.log stepweb_20190822.log stepweb_20190829.log stepweb_20190906.log stepweb_20190913.log
stepweb_20190816.log stepweb_20190823.log stepweb_20190830.log stepweb_20190907.log stepweb_20190914.log
[root@yyweb kevin]# ls /home/
[root@yyweb kevin]#
執行日志清理腳本
[root@yyweb kevin]# sh /opt/script/log_clear.sh
/opt/log/kevin/stepweb_20190810.log
/opt/log/kevin/stepweb_20190811.log
/opt/log/kevin/stepweb_20190812.log
/opt/log/kevin/stepweb_20190813.log
/opt/log/kevin/stepweb_20190814.log
/opt/log/kevin/stepweb_20190815.log
/opt/log/kevin/stepweb_20190816.log
/opt/log/kevin/stepweb_20190817.log
/opt/log/kevin/stepweb_20190818.log
/opt/log/kevin/stepweb_20190819.log
/opt/log/kevin/stepweb_20190820.log
/opt/log/kevin/stepweb_20190821.log
/opt/log/kevin/stepweb_20190822.log
/opt/log/kevin/stepweb_20190823.log
/opt/log/kevin/stepweb_20190824.log
/opt/log/kevin/stepweb_20190825.log
/opt/log/kevin/stepweb_20190826.log
/opt/log/kevin/stepweb_20190827.log
/opt/log/kevin/stepweb_20190828.log
/opt/log/kevin/stepweb_20190829.log
/opt/log/kevin/stepweb_20190830.log
/opt/log/kevin/stepweb_20190901.log
/opt/log/kevin/stepweb_20190902.log
/opt/log/kevin/stepweb_20190903.log
/opt/log/kevin/stepweb_20190904.log
/opt/log/kevin/stepweb_20190905.log
/opt/log/kevin/stepweb_20190906.log
/opt/log/kevin/stepweb_20190907.log
/opt/log/kevin/stepweb_20190908.log
/opt/log/kevin/stepweb_20190909.log
/opt/log/kevin/stepweb_20190910.log
/opt/log/kevin/stepweb_20190911.log
/opt/log/kevin/stepweb_20190912.log
日志清理后,日志路徑下只保留了最近一周的日志
[root@yyweb kevin]# ls
stepweb_20190913.log stepweb_20190915.log stepweb_20190917.log stepweb_20190919.log
stepweb_20190914.log stepweb_20190916.log stepweb_20190918.log
一周之前的日志被打包轉移到/home/log/kevin下了
[root@yyweb kevin]# ls /home/log/kevin/
stepweb_20190810.log.tar.gz stepweb_20190817.log.tar.gz stepweb_20190824.log.tar.gz stepweb_20190901.log.tar.gz stepweb_20190908.log.tar.gz
stepweb_20190811.log.tar.gz stepweb_20190818.log.tar.gz stepweb_20190825.log.tar.gz stepweb_20190902.log.tar.gz stepweb_20190909.log.tar.gz
stepweb_20190812.log.tar.gz stepweb_20190819.log.tar.gz stepweb_20190826.log.tar.gz stepweb_20190903.log.tar.gz stepweb_20190910.log.tar.gz
stepweb_20190813.log.tar.gz stepweb_20190820.log.tar.gz stepweb_20190827.log.tar.gz stepweb_20190904.log.tar.gz stepweb_20190911.log.tar.gz
stepweb_20190814.log.tar.gz stepweb_20190821.log.tar.gz stepweb_20190828.log.tar.gz stepweb_20190905.log.tar.gz stepweb_20190912.log.tar.gz
stepweb_20190815.log.tar.gz stepweb_20190822.log.tar.gz stepweb_20190829.log.tar.gz stepweb_20190906.log.tar.gz
stepweb_20190816.log.tar.gz stepweb_20190823.log.tar.gz stepweb_20190830.log.tar.gz stepweb_20190907.log.tar.gz
再貼一個簡單的日志處理腳本
#!/usr/bin/sh
#根據系統/服務/日志保留天數三個參數壓縮日志
#usage: sh clearlog.sh sysname appname keepdays
sysName=$1
appName=$2
keepDay=$3
logDir=/var/log/${sysName}/${appName}
logFile=${appName}.log
cd ${logDir}
find ./ -name "${logFile}.*[0-9][0-9]" -mtime +${keepDay} -exec gzip {} \;
十六、Linux下間隔多少秒 (即以秒為單位) 去執行某條命令或某個shell腳本的操作方法
在日常運維工作中, 經常會碰到以秒為單位去定時執行某些命令或監控腳本的需求。 說到定時任務就要用到crontab,通常來說,crontab的最小單位是分鍾級別,要想實現秒級別的定時任務,就要進行特殊設置了。
[root@test ~]# cat /root/kevin.sh
#!/bin/bash
echo "beijing is so good!" >> /root/test.log
添加腳本執行權限,並配置到crontab計划任務里(使用&& 或者 ;都是一樣的效果)。思路:先過一分鍾執行第一次,接着就是每隔2秒鍾執行一次。
[root@test ~]# chmod 755 /root/kevin.sh
[root@test ~]# crontab -e
* * * * * /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 2 && /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 4; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 6; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 8; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 10; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 12; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 14; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 16; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 18; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 20; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 22; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 24; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 26; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 28; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 30; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 32; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 34; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 36; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 38; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 40; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 42; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 44; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 46; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 48; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 50; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 52; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 54; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 56; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
* * * * * sleep 58; /bin/bash -x /root/kevin.sh >/dev/null 2>&1
[root@test ~]# tail -f /root/test.log
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
beijing is so good!
..........
..........
十七、Linux下批量ping某個網段ip的腳本
比如現在需要對172.16.50.0/24網段的ip進行檢查,檢查哪些ip現在被占用,哪些ip沒有被占用,可以通過ping命令來檢查,腳本如下:
[root@uatdns01 opt]# vim /opt/ping.sh
#!/bin/bash
. /etc/init.d/functions
for var in {1..254};
do
ip=172.16.50.$var
ping -c2 $ip >/dev/null 2>&1
if [ $? = 0 ];then
action "$ip" /bin/true
else
action "$ip" /bin/false
fi
done
[root@uatdns01 opt]# chmod 755 /opt/ping.sh
[root@uatdns01 opt]# sh /opt/ping.sh
172.16.50.1 [ OK ]
172.16.50.2 [FAILED]
172.16.50.3 [FAILED]
172.16.50.4 [FAILED]
172.16.50.5 [FAILED]
.........
.........
172.16.50.99 [ OK ]
172.16.50.100 [ OK ]
172.16.50.101 [ OK ]
172.16.50.102 [ OK ]
172.16.50.103 [ OK ]
172.16.50.104 [ OK ]
172.16.50.105 [ OK ]
172.16.50.106 [ OK ]
172.16.50.107 [ OK ]
172.16.50.108 [ OK ]
172.16.50.109 [ OK ]
172.16.50.110 [ OK ]
172.16.50.111 [FAILED]
172.16.50.112 [ OK ]
獲取172.16.60.0/24網段可用ip的shell腳本
[root@ansible-server ~]# cat ip_ping.sh
#!/bin/bash
ip=1
while [ $ip != "254" ]; do
ping 172.16.60.$ip -c 2 | grep -q "ttl=" && echo "172.16.60.$ip yes" || echo "172.16.60.$ip no"
ip=`expr "$ip" "+" "1"`
done
執行:
[root@ansible-server ~]# sh ip_ping.sh
172.16.60.1 yes
172.16.60.2 no
172.16.60.3 no
.............
172.16.60.24 yes
172.16.60.25 yes
172.16.60.26 no
.............
如果只打印可用ip, 則腳本修改如下:
[root@ansible-server ~]# cat ip_ping.sh
#!/bin/bash
ip=1
while [ $ip != "254" ]; do
ping 172.16.60.$ip -c 2 | grep -q "ttl=" && echo "172.16.60.$ip yes" || echo "172.16.60.$ip no" >/dev/null 2>&1
ip=`expr "$ip" "+" "1"`
done
執行:
[root@ansible-server ~]# sh ip_ping.sh
172.16.60.1 yes
172.16.60.21 yes
172.16.60.22 yes
172.16.60.23 yes
172.16.60.24 yes
172.16.60.25 yes
172.16.60.31 yes
172.16.60.32 yes
172.16.60.33 yes
172.16.60.34 yes
172.16.60.35 yes
172.16.60.36 yes
172.16.60.37 yes
172.16.60.38 yes
172.16.60.39 yes
獲取172.16.50.0/24, 172.16.51.0/24, 172.16.60.0/24 三個網段的可用ip
[root@ansible-server ~]# cat ip_ping.sh
#!/bin/bash
ip=1
while [ $ip != "254" ]; do
ping 172.16.50.$ip -c 2 | grep -q "ttl=" && echo "172.16.50.$ip yes" || echo "172.16.50.$ip no" >/dev/null 2>&1
ip=`expr "$ip" "+" "1"`
ping 172.16.51.$ip -c 2 | grep -q "ttl=" && echo "172.16.51.$ip yes" || echo "172.16.51.$ip no" >/dev/null 2>&1
ip=`expr "$ip" "+" "1"`
ping 172.16.60.$ip -c 2 | grep -q "ttl=" && echo "172.16.60.$ip yes" || echo "172.16.60.$ip no" >/dev/null 2>&1
ip=`expr "$ip" "+" "1"`
done
執行:
[root@ansible-server ~]# sh ip_ping.sh
172.16.50.1 yes
172.16.51.11 yes
172.16.50.16 yes
172.16.50.19 yes
172.16.51.20 yes
172.16.60.21 yes
172.16.50.22 yes
172.16.60.24 yes
172.16.50.25 yes
172.16.50.31 yes
172.16.60.33 yes
172.16.51.35 yes
172.16.60.36 yes
172.16.60.39 yes
172.16.51.41 yes
172.16.51.44 yes
172.16.50.52 yes
172.16.51.53 yes
172.16.50.55 yes
172.16.50.58 yes
172.16.51.65 yes
..................
十八、查看系統運行情況
#!/bin/bash
#用於查看系統運行情況
#功能選擇菜單
menu(){
echo -e "\033[31;32m 查看系統運行情況 \033[0m"
echo -e "\033[31;32m================================================================================\033[0m"
echo -e "\033[34m請選擇:\033[0m"
echo -e "\033[33m1、查看系統負載 2、查看CPU消耗% 3、查看內存消耗% 4、查看SWAP消耗% \033[0m"
echo -e "\033[33m5、查看磁盤消耗% 6、查看inode消耗% 7、查看磁盤IO 8、查看網絡流量 \033[0m"
echo -e "\033[33m9、一鍵查看所有情況 10、退出腳本 \033[0m"
echo -e "\033[31;32m================================================================================\033[0m"
echo
read -p "請輸入數字:1-8[單獨查看],9[一鍵查看],10[退出腳本]: " num
}
#(1)查看系統負載
load_average(){
cpu_core=`grep 'model name' /proc/cpuinfo | wc -l`
echo -e "\033[36mCPU核數: $cpu_core\033[0m"
load=`uptime |awk -F ',' '{print $3 $4 $5}' |sed 's/^ *//'`
echo -e "\033[36m$load\033[0m"
echo ""
}
#(2)查看CPU消耗%
cpu_use_percent(){
cpu_idle=`vmstat |awk '{print $15}' |sed '1,2d'`
cpu_use=$[100-$cpu_idle]
echo -e "\033[36mCPU使用率%: $cpu_use\033[0m"
echo ""
}
#(3)查看內存消耗%
mem_use_percent(){
mem_used=`free -m |grep Mem |awk '{print $3}'`
mem_tol=`free -m |grep Mem |awk '{print $2}'`
mem_use=`awk 'BEGIN{print '$mem_used'/'$mem_tol'*100}'`
echo -e "\033[36m內存已使用"$mem_used"M,總內存"$mem_tol"M,內存使用率%: $mem_use\033[0m"
echo ""
}
#(4)查看SWAP消耗%
swap_use_percent(){
swap_used=`free -m |grep Swap |awk '{print $3}'`
swap_tol=`free -m |grep Swap |awk '{print $2}'`
swap_use=`awk 'BEGIN{print '$swap_used'/'$swap_tol'*100}'`
echo -e "\033[36mSwap已使用"$swap_used"M,總Swap"$swap_tol"M,Swap使用率%: $swap_use\033[0m"
echo ""
}
#(5)查看磁盤消耗%
disk_use_percent(){
disk_use=`df -h |sed '1d'`
echo -e "\033[36m磁盤使用情況: \n$disk_use\033[0m"
echo ""
}
#(6)查看inode消耗%
inode_use_percent(){
inode_use=`df -i |sed '1d'`
echo -e "\033[36minode使用情況: \n$inode_use\033[0m"
echo ""
}
#(7)查看磁盤IO
disk_io(){
disk_io_bi=`vmstat |awk '{print $9}' |sed '1,2d'`
echo -e "\033[36m發送到塊設備的塊數: "$disk_io_bi"塊每秒\033[0m"
disk_io_bo=`vmstat |awk '{print $10}' |sed '1,2d'`
echo -e "\033[36m從塊設備接收到的塊數: "$disk_io_bo"塊每秒\033[0m"
echo ""
}
#(8)查看網絡流量
network_flow(){
#安裝所需命令sysstat
if ! rpm -q sysstat > /dev/null
then
yum install -y sysstat &>/dev/null
if [ $? -ne 0 ]
then
echo -e "\033[31msysstat 安裝失敗\033[0m"
exit 1
fi
fi
explain=`sar -n DEV |sed -n '3p'`
echo -e "\033[35m網絡流量使用情況: \n$explain\033[0m"
network_ifs=`ifconfig |grep "<UP,BROADCAST,RUNNING,MULTICAST>" |awk -F ':' '{print $1}'`
network_flow=`sar -n DEV |grep -v Average |grep $network_ifs |tail`
echo -e "\033[36m$network_flow\033[0m"
}
#腳本運行入口
run(){
while true;do
menu
case $num in
"1")
#1、查看系統負載
load_average
;;
"2")
#2、查看CPU消耗%
cpu_use_percent
;;
"3")
#3、查看內存消耗%
mem_use_percent
;;
"4") #4、查看SWAP消耗%
swap_use_percent
;;
"5") #5、查看磁盤消耗%
disk_use_percent
;;
"6") #6、查看inode消耗%
inode_use_percent
;;
"7") #7、查看磁盤IO
disk_io
;;
"8") #8、查看網絡流量
network_flow
;;
"9") #9、一鍵查看所有情況
load_average
cpu_use_percent
mem_use_percent
swap_use_percent
disk_use_percent
inode_use_percent
disk_io
network_flow
exit 0
;;
"10") #10、退出腳本
exit 0
;;
*)
;;
esac
done
}
#調用腳本運行入口
run
十九、管理docker
具體要求如下:
1)腳本支持啟動全部容器、關閉全部容器、刪除全部容器;
2)需要提示用戶如何使用該腳本,需給出范例。
#!/bin/bash
#用於管理docker容器
while ture
do
read -p "請輸入你要執行的操作:(stop/start/rm)" opt
if [ -z "$opt" ]
then
echo "請輸入你的操作"
continue
else
break
fi
done
docker ps -a |awk '{print $1}' > /tmp/id.txt
case $opt in
stop)
for id in `cat /tmp/id.txt`
do
docker stop $id
done
;;
start)
for id in `cat /tmp/id.txt`
do
docker start $id
done
;;
rm)
for id in `cat /tmp/id.txt`
do
read -p "將要刪除容器$id,是否繼續?(y|n)" c
case $c in
y|Y)
docker rm -f $id
;;
n|N)
echo "容器$id不會被刪除"
;;
*)
echo "你只能輸入 y/Y或者n/N"
;;
esac
done
*)
echo "你只能輸入 start/stop/rm"
;;
esac
二十、shell多線程備份數據庫
本案例就是實現shell多線程備份數據庫,具體要求如下:
1)公司的業務量比較大,有100個數據庫需要全量備份,而每個數據庫的數據量高達幾十GB
(注意:每一個庫都為一個獨立的實例,即有着獨立的ip:port);
2)預估每一個庫的備份時間為30分鍾左右,要求在5個小時內完成;
3)假設100個庫的庫名、host、port以及配置文件路徑都存到一個文件里,文件名字為 /tmp/databases.list ;
4)格式為:db1 10.10.10.2 3308 /data/mysql/db1/my.cnf 。
#!/bin/bash
#多線程備份數據庫
#備份數據庫使用xtrabackup(由於涉及到myisam,命令為innobackupex)
exec &> /tmp/mysql_bak.log
if ! which inoobackupex &>/dev/null
then
echo "安裝xtrabackup工具"
yum install https://repo.percona.com/yum/percona-release-latest.noarch.rpm
yum install percona-xtrabackup-24
if [ $? -ne 0 ]
then
echo -e "\033[31m安裝xtrabackup工具出錯,請檢查\033[0m"
exit 1
fi
fi
bakdir=/data/backup/mysql
bakuser=vyNctM
bakpass=99omeaBHh
bak_data()
{
db_name=$1
db_host=$2
db_port=$3
cnf=$4
[ -d $bakdir/$db_name ] || mkdir -p $bakdir/$db_name
innobackupex --defaults-file=$4 --host=$2 --port=$3 --user=$bakuser --password=$bakpass --databases=$1 $bakdir
if [ $? -ne 0 ]
then
echo -e "\033[31m備份數據庫$1出現問題\033[0m"
fi
}
fifofile=/tmp/$$
mkfifo $fifofile
exec 1000<>$fifofile
n=10
for ((i=0;i<$n;i++))
do
echo >&1000 # 0-9,創建10個線程
done
cat /tmp/databases.list |while read line
do
read -u1000
{
bak_data `echo $line`
echo >&1000
} &
done
wait
exec 1000>&- #刪除fd1000
rm -f $fifofile #刪除命名管道
腳本中,
- exec &> /tmp/mysql_bak.log ,將正確輸出和錯誤輸出都重定向到 /tmp/mysql_bak.log
- $$表示本進程PID,mkfifo命令創建命名管道
- read line ,line為變量名,將接下來的輸入賦值給line