Linux服务器性能日志收集和分析脚本

     最近老大要求分析服务器的性能数据,找到服务器运行的性能瓶颈,结果花了两天时间,写了两个脚本可以生成日志并可以进行数据提取,最终生成数据可以放到excel生成报表。过程中也学到了不少shell编程技术。

收集性能数据系统日志,每3秒收集一次,将脚本放到后台运行就行。

#!/bin/sh

while :
do
	iostat -x -t >> /var/log/jciostat.log
	vmstat -t -S M >> /var/log/jcvmstat.log
	free -g >> /var/log/jcfree_g.log
	top -b -n 1 |head -5 >> /var/log/jctop.log
	sar -P ALL 1 1 | grep : | grep all | cut -d: -f2 >> /var/log/jccpu.log
	sar -n DEV 1 1 | grep : | cut -d: -f2 >> /var/log/jcnetwork.log

	if [ -f "/var/log/jciostat.log" ];then
		if [ $(stat -c "%s" /var/log/jciostat.log) -gt $((100*1024*1024)) ];then
       		# file size is greater more than 200MB,clean file data
			cd /var/log/ >/dev/null 2>&1
			tar czvf jc.log.tar.gz jciostat.log jcvmstat.log jcfree_g.log jctop.log > /dev/null 2>&1
			echo "" > /var/log/jciostat.log
			echo "" > /var/log/jcvmstat.log
			echo "" > /var/log/jcfree_g.log
			echo "" > /var/log/jctop.log
			cd - > /dev/null 2>&1
		fi
	fi
	sleep 1
done




日志文件分析脚本

#!/bin/sh

print_help()
{
echo "use age: analyz.sh  -day <day> -start <start time> -end <end time> -<option1> <colum1,colum2...> -<option2> <colum1,colum2...> -<option3> <colum1,colum2...>"
echo "day: YYYY-MM-DD"
echo "start time:HH:MM:SS"
echo "end time:HH:MM:SS"
echo "                   1    2        3       4       5       6        7        8         9         10     11     12     13  14  15  16  17"
echo "-vmstat:           r    b        swpd    free    buff    cache    si       so        bi        bo     in     cs     us  sy  id  wa  st"
echo "-sda:                   rrqm/s   wrqm/s  r/s     w/s     rsec/s   wsec/s   avgrq-sz  avgqu-sz  await  svctm  %util"
echo "-sdb:                   rrqm/s   wrqm/s  r/s     w/s     rsec/s   wsec/s   avgrq-sz  avgqu-sz  await  svctm  %util"
echo "-network                rxpck/s  txpck/s rxkB/s  txkB/s  rxcmp/s  txcmp/s  rxmcst/s"
echo "-cpu                    us       ni      sy      wa      st       id"
echo "-mem:                   total    used    free    shared  buffers  cached"
echo "-swap:                  total    used    free"
echo "-la(load average): 5min 10min    15min"
echo "-network <netdev:[cloudbr0/bond0/eth0...]> <colum1,colum2...>"
echo "example:$0 -sda 1,2,3 -sdb 10,11,12 -network cloudbr0 2,3,4 -swap 3,4 -day 2016-07-08 -start 07:00:00 -end 08:00:00"
}

cp /var/log/jc*.log ./

day=""
start=""
end=""

vmstat=""
sda=""
sdb=""
mem=""
swap=""
la=""
cpu=""
network=""
netdev=""
while [ -n "$1" ]
do  
case "$1" in   
	"-vmstat")
        vmstat=$2
        shift
        ;;  
    "-sda")  
        sda=$2
        shift
        ;;  
    "-sdb")  
        sdb=$2
        shift
        ;;  
    "-mem")  
        mem=$2
        shift
        ;;
    "-swap")  
        swap=$2
        shift
        ;; 
    "-la")  
        la=$2
        shift
        ;;
    "-day")
        day=$2
        shift
        ;;
    "-start")
        start=$2
        shift
        ;;
    "-end")
        end=$2
        shift
        ;;
    "-cpu")
        cpu=$2
        shift
        ;;
    "-network")
        netdev=$2
        network=$3
        shift
        shift
        ;;
    "--help")
		print_help
		exit 0
        ;;
     *)
      echo "$1 is not an option"
      ;;
esac
shift
done

# 第一步:生成独立的日志csv文件
if [ ! -z $vmstat ];then
	colum_name=("CST" "vmstat_r" "vmstat_b" "vmstat_swpd" "vmstat_free" "vmstat_buff" "vmstat_cache" "vmstat_si" "vmstat_so" "vmstat_bi" "vmstat_bo" "vmstat_in" "vmstat_cs" "vmstat_us" "vmstat_sy" "vmstat_id" "vmstat_wa" "vmstat_st")
	OLD_IFS="$IFS"
	IFS=","
	colums=($vmstat)
	IFS="$OLD_IFS"
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]}
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > vmstat.csv1
	
	# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
	echo '#!/bin/sh' > vmstat.sh
	echo "grep ${colum_name[0]} jcvmstat.log | gawk '{print $o_colum}' >> vmstat.csv1" >> vmstat.sh
	chmod u+x vmstat.sh
	./vmstat.sh
	rm -rf vmstat.sh
fi

if [ ! -z $sda ];then
	colum_name=("sda" "" "sda_rrqm/s" "sda_wrqm/s" "sda_r/s" "sda_w/s" "sda_rsec/s" "sda_wsec/s" "sda_avgrq-sz" "sda_avgqu-sz" "sda_await" "sda_svctm" "sda_%util")
	OLD_IFS="$IFS" 
	IFS="," 
	colums=($sda) 
	IFS="$OLD_IFS" 
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > sda_io.csv1
	
	# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
	echo '#!/bin/sh' > sda.sh
	echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sda_io.csv1" >> sda.sh
	chmod u+x sda.sh
	./sda.sh
	rm -rf sda.sh
fi

if [ ! -z $sdb ];then
	colum_name=("sdb" "" "sdb_rrqm/s" "sdb_wrqm/s" "sdb_r/s" "sdb_w/s" "sdb_rsec/s" "sdb_wsec/s" "sdb_avgrq-sz" "sdb_avgqu-sz" "sdb_await" "sdb_svctm" "sdb_%util")
	OLD_IFS="$IFS" 
	IFS="," 
	colums=($sdb) 
	IFS="$OLD_IFS" 
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > sdb_io.csv1
	
	# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
	echo '#!/bin/sh' > sdb.sh
	echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sdb_io.csv1" >> sdb.sh
	chmod u+x sdb.sh
	./sdb.sh
	rm -rf sdb.sh
fi

if [ ! -z $mem ];then
	colum_name=("Mem" "" "mem_total" "mem_used" "mem_free" "shared" "buffers" "cached")
	OLD_IFS="$IFS" 
	IFS="," 
	colums=($mem) 
	IFS="$OLD_IFS" 
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > mem_used.csv1
	
	echo '#!/bin/sh' > mem.sh
	echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> mem_used.csv1" >> mem.sh
	chmod u+x mem.sh
	./mem.sh
	rm -rf mem.sh
fi

if [ ! -z $swap ];then
	colum_name=("Swap" "" "swap_total" "swap_used" "swap_free")
	OLD_IFS="$IFS" 
	IFS="," 
	colums=($swap) 
	IFS="$OLD_IFS"
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > swap_used.csv1
	echo '#!/bin/sh' > swap.sh
	echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> swap_used.csv1" >> swap.sh
	chmod u+x swap.sh
	./swap.sh
	rm -rf swap.sh
fi

if [ ! -z $la ];then
	colum_name=("load average" "load_5min" "load_10min" "load_15min")
	OLD_IFS="$IFS"
	IFS="," 
	colums=($la)
	IFS="$OLD_IFS"
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > load.csv1
	
	echo '#!/bin/sh' > la.sh
	echo "grep \"${colum_name[0]}\" jctop.log | cut -d, -f3,4,5 | cut -d: -f2 | gawk '{print $o_colum}'>> load.csv1" >> la.sh
	chmod u+x la.sh
	./la.sh
	rm -rf la.sh
fi

if [ ! -z $cpu ];then
	colum_name=("all" "" "us" "ni" "sy" "wa" "st" "id")
	OLD_IFS="$IFS"
	IFS="," 
	colums=($cpu)
	IFS="$OLD_IFS"
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > cpu.csv1
	
	echo '#!/bin/sh' > cpu.sh
	echo "grep \"${colum_name[0]}\" jccpu.log | gawk '{print $o_colum}'>> cpu.csv1" >> cpu.sh
	chmod u+x cpu.sh
	./cpu.sh
	rm -rf cpu.sh
fi

if [ ! -z $network ];then
	colum_name=("" "" "rxpck/s" "txpck/s" "rxkB/s" "txkB/s" "rxcmp/s" "txcmp/s" "rxmcst/s")
	OLD_IFS="$IFS"
	IFS="," 
	colums=($network)
	IFS="$OLD_IFS"
	o_colum=""
	o_colum_name=""
	for c in ${colums[@]} 
	do
		if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
			continue
		fi
		o_colum=${o_colum}\$$c\",\"
		o_colum_name=${o_colum_name}${colum_name[$c]}"_"${netdev},
	done
	o_colum=${o_colum%\"}
	o_colum=${o_colum%,}
	o_colum=${o_colum%\"}
	o_colum_name=${o_colum_name%,}
	echo $o_colum_name > network.csv1
	
	echo '#!/bin/sh' > network.sh
	echo "grep \"$netdev\" jcnetwork.log | gawk '{print $o_colum}'>> network.csv1" >> network.sh
	chmod u+x network.sh
	./network.sh
	rm -rf network.sh
fi

#输出时间
echo time > time.csv1
grep "CST" jcvmstat.log | gawk {'print $18"/"$19'} >> time.csv1

# 第二步:整合csv文件
i=0 # next csv file
j=0 # prev csv file
csv_files=`ls *.csv1|grep -v "time.csv1"`
for f in $csv_files
do
	# 可能在行尾有逗号,删除这个逗号
	sed -i 's/,$//g' $f
	
	if [ $i -eq 0 ];then  # first
		gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv1 $f > tmp$j.csv2
		i=$(($i+1))
	else # not first
		gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp$j.csv2 $f > tmp$i.csv2
		i=$(($i+1))
		j=$(($j+1))
	fi
done

i=$(($i-1))
mv tmp$i.csv2  result.csv
sed -i 's/time/    /g' result.csv

#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv swap_used.csv > tmp1.csv
#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp1.csv sda_used.csv > tmp2.csv
#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp2.csv sdb_used.csv > tmp3.csv
#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp3.csv load.csv > result.csv
#sed -i 's/time/    /g' result.csv

if [ ! -z $day ];then
	date_str=`echo $day | grep -E '^[0-9]{4}-[0-9]{2}-[0-9]{2}'`
	if [ ! -z "$date_str" ];then
		head -1 result.csv > $date_str.csv
		grep $date_str result.csv >> $date_str.csv
		sed -i 's/ //g' $date_str.csv
		if [ ! -z $start ] && [ ! -z $end ];then
			st=`echo $start | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
			et=`echo $end | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
			if [ ! -z $st ] && [ ! -z $et ];then
				stn=`echo $st|sed 's/://g'`
				etn=`echo $et|sed 's/://g'`
				filename=${date_str}-${stn}-${etn}.csv
				head -1 $date_str.csv > $filename
				lines=`cat $date_str.csv`
				for line in $lines
				do
					ctn=`echo $line | cut -d',' -f1|cut -d'/' -f2|sed 's/://g'`
					if [ `expr $ctn + 0` -gt `expr $stn + 0` ] && [ `expr $ctn + 0` -lt `expr $etn + 0` ];then
						echo $line >> $filename
					fi
				done
			else
				echo "Time foramt error.Please input HH-MM-SS"
			fi
		fi
	else
		echo "Date foramt error.Please input YYYY-MM-DD"
	fi
fi
rm -rf *.csv1
rm -rf *.csv2
rm -rf jc*.log

要生成 2016年7月8日 早上7点到8点之间内存的used和cache,swap的used和free,sda磁盘的%util 可以使用如下命令:

./analyz.sh -swap 3,4 -sda 12  -mem 3,7 -day 2016-07-08 -start 07:00:00 -end 08:00:00

将生成的csv文件用excel打开,就可以使用图表功能生成出性能曲线。


发布了71 篇原创文章 · 获赞 23 · 访问量 28万+

猜你喜欢

转载自blog.csdn.net/ssmile/article/details/51862526