1.編寫exporter
import prometheus_client from prometheus_client import Gauge,start_http_server,Counter import pycurl import time import threading from io import BytesIO #創建client_python里提供的prometheus Counter數據類型 url_http_code = Counter("url_http_code", "request http_code of the host",['code','url']) url_http_request_time = Counter("url_http_request_time", "request http_request_time of the host",['le','url']) http_request_total = Counter("http_request_total", "request request total of the host",['url']) #curl url,返回狀態碼和總共耗時 -- 返回狀態和響應時間 def test_website(url): buffer_curl = BytesIO() c = pycurl.Curl() c.setopt(pycurl.URL, url) # c.setopt(pycurl.WRITEDATA, buffer_curl) c.setopt(pycurl.CONNECTTIMEOUT, 3) c.setopt(pycurl.TIMEOUT, 3) try: c.perform() except pycurl.error: http_code = 500 http_total_time = 999 else: http_code = c.getinfo(pycurl.HTTP_CODE) http_total_time = c.getinfo(pycurl.TOTAL_TIME) return http_code, http_total_time #根據curl返回值,統計放到exporter顯示的數據 -- 統計各個狀態的總數 def count_metric(url): http_code, http_total_time = test_website(url) if http_code >= 100 and http_code < 200 : url_http_code.labels('1xx',url).inc() elif http_code >= 200 and http_code < 300 : url_http_code.labels('2xx',url).inc() elif http_code >= 300 and http_code < 400 : url_http_code.labels('3xx',url).inc() elif http_code >= 400 and http_code < 500 : url_http_code.labels('4xx',url).inc() else: url_http_code.labels('5xx',url).inc() if http_total_time < 1 : url_http_request_time.labels('1',url).inc() elif http_total_time < 2 : url_http_request_time.labels('2',url).inc() elif http_total_time < 3 : url_http_request_time.labels('3',url).inc() else : url_http_request_time.labels('+Inf',url).inc() http_request_total.labels(url).inc() #線程控制,每隔5s執行curl url def count_threads(url): while True: t = threading.Thread(target=count_metric,args=(url,)) t.setDaemon(True) t.start() time.sleep(5) #將每個需要監控的域名起一個進程 if __name__ == '__main__': start_http_server(9091) server_list = [ 'www.baidu.com', 'www.qq.com', 'blog.csdn.net', 'github.com', 'google.com' ] threads = [] for url in server_list: t = threading.Thread(target=count_threads,args=(url,)) threads.append(t) for thread in threads: thread.setDaemon(True) thread.start() thread.join()
坑:prometheus不會提醒metrics的名字,要主動復制進去:curl http://10.0.0.111:19091/metrics
復制:https://blog.csdn.net/specter11235/article/details/87927202
--------------------------------------------------------------------------------------
計算exporter的metrics的比率
#自定義exporter-counter ##狀態碼是500的個數 url_http_code_total{code="5xx",url="10.0.0.111:55555/a.txt"} #訪問接口的個數 http_request_total{url="10.0.0.111:55555/a.txt"} ------------------------------------------------------------------ #錯誤率 delta(url_http_code_total{code="5xx",url="10.0.0.111:55555/a.txt"}[1m]) / on(url) group_left delta(http_request_total{url="10.0.0.111:55555/a.txt"}[1m]) #http code的每分鍾增長率,如果出現5xx,就說明有問題了 irate(http_request_total[1m]) #顯示期望時間的比例,比如只顯示小於1秒,占總次數的比例 delta(url_http_request_time_total{le='1'}[1m]) / on(url) group_left delta(http_request_total[1m]) #復制 https://blog.csdn.net/specter11235/article/details/87927202