1 # encoding:UTF-8 2 import xlsxwriter 3 import datetime 4 import pymysql 5 import numpy as np 6 import pandas 7 8 __author__ = 'sanjing' 9 __data__ = '2019/06/14' 10 11 12 averagesql = """ 13 select from_unixtime(clock,'%Y-%m-%d') as time, 14 count(DISTINCT eventid) as id 15 from alerts 16 WHERE mediatypeid = 5 17 AND message LIKE '%告警等級: Average%' 18 AND SUBJECT LIKE '%PROBLEM%' 19 AND clock BETWEEN UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 7 DAY)) AND UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) 20 group by time 21 order by time asc 22 23 """ 24 25 highsql = """ 26 select from_unixtime(clock,'%Y-%m-%d') as time, 27 count(DISTINCT eventid) as id 28 from alerts 29 WHERE mediatypeid = 5 30 AND message LIKE '%告警等級: High%' 31 AND SUBJECT LIKE '%PROBLEM%' 32 AND clock BETWEEN UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 7 DAY)) AND UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) 33 group by time 34 order by time asc 35 """ 36 37 warningsql = """ 38 select from_unixtime(clock,'%Y-%m-%d') as time, 39 count(DISTINCT eventid) as id 40 from alerts 41 WHERE mediatypeid = 5 42 AND message LIKE '%告警等級: Warning%' 43 AND SUBJECT LIKE '%PROBLEM%' 44 AND clock BETWEEN UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 7 DAY)) AND UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) 45 group by time 46 order by time asc 47 """ 48 49 informationsql = """ 50 select from_unixtime(clock,'%Y-%m-%d') as time, 51 count(DISTINCT eventid) as id 52 from alerts 53 WHERE mediatypeid = 5 54 AND message LIKE '%告警等級: Information%' 55 AND SUBJECT LIKE '%PROBLEM%' 56 AND clock BETWEEN UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 7 DAY)) AND UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) 57 group by time 58 order by time asc 59 """ 60 61 disastersql = """ 62 select from_unixtime(clock,'%Y-%m-%d') as time, 63 count(DISTINCT eventid) as id 64 from alerts 65 WHERE mediatypeid = 5 66 AND message LIKE '%告警等級: Diasater%' 67 AND SUBJECT LIKE '%PROBLEM%' 68 AND clock BETWEEN UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 7 DAY)) AND UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) 69 group by time 70 order by time asc 71 """ 72 73 74 #連接MySQL數據庫 75 def get_count(sql): 76 conn=pymysql.connect("xx.xx.xx.xx", "readonly", "xxxx", "zabbix", charset='utf8') 77 cursor = conn.cursor() 78 cursor.execute(sql) 79 count = cursor.fetchall() 80 # 將rows轉化為數組 81 rows = np.array(count) 82 conn.close() 83 return count 84 85 86 def coloum(data, weekendtime): 87 #創建一個excel文件 88 workbook = xlsxwriter.Workbook(weekendtime +".xlsx") 89 #創建一個工作表,默認sheet1 90 worksheet = workbook.add_worksheet() 91 bold = workbook.add_format({'bold': 1}) 92 #表頭 93 94 title = ['告警級別', '星期一','星期二','星期三','星期四','星期五','星期六','星期日'] 95 #列名 96 buname = ['information', 'warning','average', 'high', 'disaster'] 97 # 定義數據formatter格式對象,設置邊框加粗1像素 98 formatter = workbook.add_format() 99 formatter.set_border(1) 100 #定義格式:# 定義標題欄格式對象:邊框加粗1像素,背景色為灰色,單元格內容居中、加粗 101 title_formatter = workbook.add_format() 102 title_formatter.set_border(1) 103 title_formatter.set_bg_color('#cccccc') 104 title_formatter.set_align('center') 105 title_formatter.set_bold() 106 chart_col = workbook.add_chart({'type': 'column'}) 107 def chart_series(row): 108 chart_col.add_series( 109 { 110 'categories': '=Sheet1!$B$1:$H$1', 111 'values': '=Sheet1!$B${}:$H${}'.format(row, row), 112 'line': {'color': 'black'}, 113 # 'name': '=Sheet1!$A${}'.format(row) 114 'name': '=Sheet1!$A$' + row 115 } 116 ) 117 # 下面分別以行和列的方式將標題欄、業務名稱、流量數據寫入單元格,並引用不同的格式對象 118 worksheet.write_row('A1',title,title_formatter) 119 worksheet.write_column('A2',buname,formatter) 120 for i in range (2,7): 121 worksheet.write_row('B{}'.format(i),data[i-2],formatter) 122 print (i) 123 chart_series(str(i)) 124 125 126 # 設置圖表的title 和 x,y軸信息 127 chart_col.set_title({'name': '告警統計/周'}) 128 chart_col.set_x_axis({'name': '告警級別'}) 129 chart_col.set_y_axis({'name': '告警次數'}) 130 # 設置圖表的風格 131 # chart_col.set_style(37) 132 133 # 把圖表插入到worksheet以及偏移 134 worksheet.insert_chart('A10', chart_col, {'x_offset': 25, 'y_offset': 10}) 135 workbook.close() 136 #判斷二維元組是否為空,長度是否滿足要求,不滿足則補0. 137 #輸入為一個字典,判斷是否為空,空則添加數據 138 def covertdata(jsondata,weektime): 139 140 listkey = list(jsondata.keys()) 141 for i in weektime: 142 j = i.strftime("%Y-%m-%d") 143 if listkey: 144 if j not in listkey: 145 jsondata[j] = "0" 146 else: 147 jsondata[j] = "0" 148 # print (jsondata) 149 #按照時間對字典進行排序 150 sort = sorted(jsondata.items(), key=lambda d: d[0]) 151 #將第二列取出來並轉為列表 152 array = np.array(sort) 153 array2 = array[:, 1] 154 list2 = array2.tolist() 155 list3 = list(map(lambda x: float(x), list2)) 156 return list3 157 # print (list2) 158 159 160 if __name__ == '__main__': 161 yesterday = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y-%m-%d") 162 weeklist = pandas.date_range(end=yesterday, periods=7) 163 informationdata = get_count(informationsql) 164 informationlist = covertdata(dict(informationdata),weeklist) 165 warningdata = get_count(warningsql) 166 warninglist = covertdata(dict(warningdata),weeklist) 167 averagedata = get_count(averagesql) 168 averagelist = covertdata(dict(averagedata),weeklist) 169 highdata = get_count(highsql) 170 highlist = covertdata(dict(highdata),weeklist) 171 disasterdata = get_count(disastersql) 172 disasterlist = covertdata(dict(disasterdata),weeklist) 173 # print (informationlist) 174 # print (warninglist) 175 # print (averagelist) 176 # print (highlist) 177 # print (disasterlist) 178 data1 = [informationlist, warninglist, averagelist, highlist, disasterlist] 179 coloum(data1,yesterday)
結果如圖: