xray自动化


使用xray做代理,请求所有找到的url,自动安全扫描

import threading
import requests
import json


def xray_proxy(sub_domain='', page_no_iter=''):
    while True:
        try:
            url_list = json.loads(
                requests.request('GET', 'https://otx.alienvault.com/otxapi/indicator/domain/url_list/' + sub_domain +
                                 '?limit=100&page=' + str(next(page_no_iter) + 1)).text)['url_list']
            for url_json in url_list:
                print(url_json['url'])
                requests.request('GET', url_json['url'], proxies=proxies, verify=cert_path, timeout=xray_timeout)
        except StopIteration:
            return


def scan_main(sub_domain):
    # IO密集多线程设置
    thread_num = 100
    # 获取有多少页url
    res = json.loads(
        requests.request('GET', 'https://otx.alienvault.com/otxapi/indicator/domain/url_list/' + sub_domain +
                         '?limit=100&page=1').text)
    # 需要爬取的页码生成器
    page_no_iter = iter(range(int(res['actual_size'] / 100 + 1)))

    all_threads = []
    for i in range(thread_num):
        t = threading.Thread(target=xray_proxy, args=(sub_domain, page_no_iter))
        t.start()
        all_threads.append(t)
    for t in all_threads:
        t.join()


if __name__ == '__main__':
    # xray代理配置
    proxies = {
        "http": "http://127.0.0.1:8080",
        "https": "http://127.0.0.1:8080",
    }
    # 证书地址
    cert_path = 'D:/xray/cacert.der'
    # xray代理超时时间
    xray_timeout = 10
    # 要扫描的子域名列表
    sub_domain_list = ['baidu.com', 'google.com']
    for subdomain in sub_domain_list:
        scan_main(subdomain)

  

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM