Scrapy第十一(①)篇:selenium4模拟器中间件
import scrapy
from scrapy.http import HtmlResponse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from scrapy_selenium import SeleniumMiddleware
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://example.com']
def start_requests(self):
# 使用selenium来生成起始请求
for url in self.start_urls:
yield SeleniumRequest(url=url, callback=self.parse)
def parse(self, response):
# 解析响应内容
pass
class SeleniumMiddlewareCustom(SeleniumMiddleware):
def __init__(self, driver_name='chrome', driver_executable_path=None, port=0,
chrome_options=None, service_args=None, proxy=None,
download_timeout=None, wait_until=None,
browser_profile=None):
super().__init__(driver_name, driver_executable_path, port,
chrome_options, service_args, proxy,
download_timeout, wait_until,
browser_profile)
self.driver = webdriver.Chrome(options=chrome_options, service_args=service_args)
@classmethod
def from_crawler(cls, crawler):
# 从爬虫设置中获取选项和服务参数
settings = crawler.settings
chrome_options = Options()
service_args = ['--verbose', '--log-path=/tmp/geckodriver.log']
# 其他设置...
return cls(chrome_options=chrome_options, service_args=service_args)
这个代码示例展示了如何创建一个自定义的Selenium中间件类,它继承自Scrapy的SeleniumMiddleware。在这个类中,我们覆盖了__init__
方法,以便我们可以传递自定义的Chrome选项和服务参数。我们还覆盖了from_crawler
类方法,以便我们可以从Scrapy爬虫的设置中提取这些参数。这样,我们就可以在爬虫中使用这个自定义的Selenium中间件,并根据需要配置它。
评论已关闭