Python 爬虫:多进程,多线程爬虫<提高爬取效率>
import requests
from lxml import etree
from multiprocessing import Pool
from threading import Thread, Lock
# 初始化请求头
headers = {
'User-Agent': 'your_user_agent'
}
# 初始化URL列表
urls = ['http://www.example.com/page{}'.format(i) for i in range(1, 11)]
# 初始化锁
lock = Lock()
# 爬取页面函数
def crawl_page(url):
response = requests.get(url, headers=headers)
return response.content.decode('utf-8')
# 解析内容函数
def parse_content(data):
html = etree.HTML(data)
# 解析你想要的内容
items = html.xpath('//your_xpath')
for item in items:
# 处理你想要的内容
pass
# 多线程爬取
def multi_threading_crawl(urls):
threads = [Thread(target=crawl_page, args=(url,)) for url in urls]
for t in threads:
t.start()
for t in threads:
t.join()
# 多进程爬取
def multi_processing_crawl(urls):
with Pool(processes=4) as pool:
results = pool.map(crawl_page, urls)
for result in results:
parse_content(result)
# 执行多线程或多进程爬取
multi_processing_crawl(urls) # 使用多进程
# multi_threading_crawl(urls) # 或者使用多线程
这个示例代码展示了如何使用Python的multiprocessing
和threading
库来实现多进程和多线程爬取,以提高爬取效率。在实际应用中,你需要根据目标网站的robots.txt文件和服务器的承受能力来选择合适的并发策略。
评论已关闭