Python多线程爬虫结果乱序问题解析与解决方案
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urljoin
def save_result(result):
# 保存爬取结果的函数,这里简单打印结果
print(result)
def crawl_page(url):
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
# 假设我们要抓取页面中的所有链接
links = soup.find_all('a', href=True)
return [(urljoin(url, link['href']),) for link in links]
def main():
# 待爬取的URL列表
urls = ['http://example.com/page1', 'http://example.com/page2']
# 使用ThreadPoolExecutor创建线程池
with ThreadPoolExecutor(max_workers=5) as executor:
# 提交爬取页面的任务
futures = [executor.submit(crawl_page, url) for url in urls]
# 等待所有爬取任务完成
results = [future.result() for future in futures]
# 将所有爬取结果进行合并
all_results = [item for sublist in results for item in sublist]
# 保存结果
for result in all_results:
save_result(result)
if __name__ == '__main__':
main()
这段代码使用了concurrent.futures.ThreadPoolExecutor
来实现多线程爬取,并展示了如何合并多线程的爬取结果。在实际应用中,你可以根据需要对save_result
函数进行相应的修改,以保存或处理爬取的数据。
评论已关闭