Python 3.x ADPool执行器存在问题\u wait \u for \u tstate\u lock python(线程死锁?)
我与ThreadPoolExecutor有问题。它开始时很强劲,然后减速到最后停止。我不明白我做错了什么,我已经尝试将ThreadPoolExecutor部分移动到domains\u循环之外,只是不使用domains\u循环,但它做了同样的事情 将maxworkers更改为5只会使它早些时候冻结,因此我知道我一定是做错了与线程数量无关的事情 文件读取、url连接和文件写入工作正常,似乎是异步http请求被破坏了 有趣的是,如果我有更少的子域,它仍然会偶尔锁定Python 3.x ADPool执行器存在问题\u wait \u for \u tstate\u lock python(线程死锁?),python-3.x,python-multithreading,Python 3.x,Python Multithreading,我与ThreadPoolExecutor有问题。它开始时很强劲,然后减速到最后停止。我不明白我做错了什么,我已经尝试将ThreadPoolExecutor部分移动到domains\u循环之外,只是不使用domains\u循环,但它做了同样的事情 将maxworkers更改为5只会使它早些时候冻结,因此我知道我一定是做错了与线程数量无关的事情 文件读取、url连接和文件写入工作正常,似乎是异步http请求被破坏了 有趣的是,如果我有更少的子域,它仍然会偶尔锁定 sub_file = [ '
sub_file = [
'mail.myresmed.com',
'www.resmed.com',
'bcg29k.2163007t.resmed.com',
'account.resmed.com',
'account-uat.resmed.com',
'www.account-uat.resmed.com',
'adfs.resmed.com',
'admin-mysleep.resm'
]
dir_file = ['/.git', '/test', '/manage', '/login']
subfile_iterator = [0]
dirfile_iterator = [0]
subfile_readstack = []
dirfile_readstack = [""] #first element is blank so the base url will be fetched
domains_list = []
results_list = []
sleep_inc = 0.0001
stack_size = 100
#browser_list = []
results = []
'''
***************************************************************************************************************************************************************************
FILE FNs
***************************************************************************************************************************************************************************
'''
async def write_to_file(results_list):
file = open('results.txt', 'a')
print("Writing to log")
for result in results_list:
#print("writing...\n")
#print(result.headers)
#file.write("{}\n\n".format(result.headers))
headers = result.headers
cookiejar = result.cookies
cookies = cookiejar.items()
file.write("\n\n")
file.write("***************************************************************************************************************************************\n")
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write(" {} \n".format(result.url))
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write("- status: {}\n".format(result.status_code))
file.write("- reason: {}\n".format(result.reason))
#file.write("- is redirect? {}\n".format(result.is_redirect))
#if result.is_redirect:
# file.write("is permanent redirect? {}\n".format(result.is_permanent.redirect))
file.write("\n- headers: \n")
for key,value in headers.items():
file.write("\t{keys}: {values}\n".format(keys=key, values=value))
file.write("\n- cookies: \n")
for cookie in cookies:
file.write("\t{}\n".format(cookie))
result_bytes = result.content
html_formatted = result_bytes.decode('utf-8')
soup = bs(html_formatted, "html.parser")
file.write("\n----------------------\n")
file.write("- style tags: \n")
file.write("----------------------\n\n")
for tags in soup.find_all('style'):
#prettify the css
file.write("{}\n\n".format(tags))
file.write("\n----------------------\n")
file.write("- script tags: \n")
file.write("----------------------\n\n")
for tags in soup.find_all('script'):
#prettify the javascript
file.write("{}\n\n".format(tags))
file.write("\n----------------------\n")
file.write("- links: \n")
file.write("----------------------\n\n")
for tags in soup.find_all('a'):
#prettify the javascript
file.write("{}\n\n".format(tags))
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write("---------------------------------------------------------------------------------------------------------------------------------------\n")
file.write("***************************************************************************************************************************************\n")
file.write("\n")
file.close()
def files_exist(subfile, dirfile):
if os.path.isfile(subfile):
subfile_exist = True
else:
print('sub_file does not exit')
if os.path.isfile(dirfile):
dirfile_exist = True
else:
print('dir_file does not exit')
if subfile_exist and dirfile_exist:
return True
else:
sys.exit()
async def read_from_file(list_file, file_lines, read_stack, file_iterator):
global sleep_inc
if file_iterator[-1] >= file_lines -1:
return
if len(read_stack) < stack_size -1:
with open(list_file) as f:
for i in range(1, file_lines+1):
file_iterator.append(i)
line = linecache.getline(list_file, i, module_globals=None).strip()
if len(line) > 0:
print("reading: {}".format(line))
read_stack.append(line)
await asyncio.sleep(sleep_inc)
if i == stack_size:
await asyncio.sleep(sleep_inc)
else:
await asyncio.sleep(sleep_inc)
async def get_lines(list_file):
with open(list_file) as f:
f.seek(0) #ensure you're at the start of the file..
first_char = f.read(1) #get the first character
if not first_char:
print("FAIL: the sub or dir files (or both) are empty") #first character is the empty string..
sys.exit()
else:
f.seek(0) #f
for i, l in enumerate(f):
await asyncio.sleep(sleep_inc)
pass
return i + 1
async def file_lines():
global sub_file
global dir_file
#global subfile_lines
#global dirfile_lines
if files_exist(sub_file, dir_file):
print("Reading files... ")
subfile_lines = files_read_loop.create_task(get_lines(sub_file))
dirfile_lines = files_read_loop.create_task(get_lines(dir_file))
await asyncio.wait([subfile_lines, dirfile_lines])
return (subfile_lines, dirfile_lines)
async def load_files():
global sub_file
global dir_file
global subfile_iterator
global dirfile_iterator
global subfile_readstack
global dirfile_readstack
(subfile_lines, dirfile_lines) = await file_lines()
read_from_sub_file = files_read_loop.create_task(read_from_file(sub_file, subfile_lines.result(), subfile_readstack, subfile_iterator))
read_from_dir_file = files_read_loop.create_task(read_from_file(dir_file, dirfile_lines.result(), dirfile_readstack, dirfile_iterator))
concat_sub_to_dir = files_read_loop.create_task(concat_addr(subfile_readstack, dirfile_readstack))
await asyncio.wait([read_from_sub_file, read_from_dir_file, concat_sub_to_dir])
async def write_log():
global results
print("write_log")
ret = files_write_loop.create_task(write_to_file(results))
'''
***************************************************************************************************************************************************************************
URL FNs
***************************************************************************************************************************************************************************
'''
async def concat_addr(subread, dirread):
global results_list
global domains_list
global sleep_inc
global subfile_readstack
global dirfile_readstack
global subfile_lines
global dirfile_lines
domains_list_size = len(domains_list)
if domains_list_size < stack_size -1:
for i, j in enumerate(subfile_readstack):
for j, k in enumerate(dirfile_readstack):
domains_list.insert(0, subfile_readstack[i] + dirfile_readstack[j])
print("adding: {subf}{dirf} to domains_list".format(subf=subfile_readstack[i], dirf=dirfile_readstack[j]))
await asyncio.sleep(sleep_inc)
else:
await asyncio.sleep(sleep_inc)
def fetch(session, url):
FQDM = "https://{domain}?".format(domain=url)
try:
fresh_agent = user_agents.swap()
custom_header = {'user-agent': fresh_agent}
with session.get(FQDM, headers=custom_header) as response:
status = response.status_code
url = response.url
print(f"=== {status} - {url}")
results.append(response)
return response
except:
print(f"Server at {url} not found")
finally:
pass
async def get(domains):
global results
with ThreadPoolExecutor(max_workers=50) as executor:
with requests.Session() as session:
loop = asyncio.get_event_loop()
print('''\n\n
------------------------
RESULTS
------------------------
\n
''')
for url in domains:
loop.run_in_executor( executor, fetch, *(session, url))
return True
async def iterate_domains():
global results
global domains_list
ret = domains_loop.create_task(get(domains_list))
'''
***************************************************************************************************************************************************************************
MAIN
***************************************************************************************************************************************************************************
'''
if __name__ == "__main__":
try:
#file_sema = asyncio.BoundedSemaphore(value=10)
files_read_loop = asyncio.get_event_loop()
files_read_loop.run_until_complete(load_files())
domains_loop = asyncio.get_event_loop()
domains_loop.set_debug(1)
domains_loop.run_until_complete(iterate_domains())
files_write_loop = asyncio.get_event_loop()
files_write_loop.run_until_complete(write_log())
except Exception as e:
print("****** EXCEPTION: {} ".format(e))
pass
finally:
files_read_loop.close()
domains_loop.close()
files_write_loop.close()
sub_文件=[
'mail.myresmed.com',
“www.resmed.com”,
'bcg29k.2163007t.resmed.com',
“account.resmed.com”,
“account uat.resmed.com”,
“www.account-uat.resmed.com”,
"adfs.resmed.com",,
'admin mysleep.resm'
]
目录文件=['/.git'、'/test'、'/manage'、'/login']
子文件_迭代器=[0]
dirfile_迭代器=[0]
子文件_readstack=[]
dirfile_readstack=[“”]#第一个元素为空,因此将获取基本url
域\u列表=[]
结果列表=[]
睡眠时间=0.0001
堆栈大小=100
#浏览器列表=[]
结果=[]
'''
***************************************************************************************************************************************************************************
文件FNs
***************************************************************************************************************************************************************************
'''
异步def写入文件(结果列表):
文件=打开('results.txt','a')
打印(“写入日志”)
对于结果列表中的结果:
#打印(“正在写入…\n”)
#打印(结果标题)
#file.write(“{}\n\n”.format(result.headers))
headers=result.headers
cookiejar=result.cookies
cookies=cookiejar.items()
file.write(“\n\n”)
文件。写入(“***************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
file.write(“{}\n”.format(result.url))
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
file.write(“-status:{}\n”.format(result.status\u code))
file.write(“-reason:{}\n”.format(result.reason))
#file.write(“-is redirect?{}\n.”格式(result.is\u redirect))
#如果result.is_重定向:
#file.write(“is permanent redirect?{}\n.”格式(result.is_permanent.redirect))
file.write(“\n-标题:\n”)
对于键,标头中的值。项()
file.write(“\t{keys}:{values}\n”.format(keys=key,values=value))
file.write(“\n-cookies:\n”)
对于cookie中的cookie:
file.write(“\t{}\n”.format(cookie))
result\u bytes=result.content
html\u formatted=result\u bytes.decode('utf-8')
soup=bs(html\u格式的“html.parser”)
file.write(“\n-------------------\n”)
file.write(“-style标记:\n”)
file.write(“-------------------------\n\n”)
用于汤中的标签。查找所有('style'):
#美化css
file.write(“{}\n\n”.format(标记))
file.write(“\n-------------------\n”)
file.write(“-script标记:\n”)
file.write(“-------------------------\n\n”)
对于soup.find_all('script')中的标记:
#美化javascript
file.write(“{}\n\n”.format(标记))
file.write(“\n-------------------\n”)
file.write(“-links:\n”)
file.write(“-------------------------\n\n”)
用于汤中的标签。查找所有('a'):
#美化javascript
file.write(“{}\n\n”.format(标记))
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
file.write(“-----------------------------------------------------------------------------------------------------------------------------------------\n”)
文件。写入(“***************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
文件。写入(“\n”)
file.close()文件
def文件\u存在(子文件,目录文件):
如果os.path.isfile(子文件):
子文件_exist=True
其他:
打印('子_文件不退出')
如果os.path.isfile(dirfile):
dirfile_exist=True
其他:
打印('目录文件不退出')
如果存在子文件和目录文件:
返回真值
其他:
sys.exit()
异步定义从文件读取(列出文件、文件行、读取堆栈、文件迭代器):
全球睡眠公司
如果文件迭代器[-1]>=文件行-1:
返回
如果len(读取堆栈)<堆栈大小-1:
打开(列表文件)为f时:
对于范围内的i(1,文件行+1):
文件_iterator.append(i)
line=linecache.getline(list_文件,i,module_globals=None).strip()
如果len(线)>0:
打印(“读取:{}”。格式(行))
read_stack.append(行)
等待异步睡眠
def fetch(session, url):
FQDM = "https://{domain}?".format(domain=url)
try:
fresh_agent = user_agents.swap()
custom_header = {'user-agent': fresh_agent}
with session.get(FQDM, headers=custom_header, timeout=X) as response: <<<<<--------
status = response.status_code
url = response.url
print(f"=== {status} - {url}")
results.append(response)
return response
except:
print(f"Server at {url} not found")
finally:
pass