Scrapy爬虫框架案例学习之五(爬取京东图书信息通过selenium中间件技术)
import scrapy
from scrapy.selector import Selector
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class JdBookSpider(scrapy.Spider):
name = 'jd_book'
allowed_domains = ['jd.com']
start_urls = ['https://book.jd.com/booksort.html']
def __init__(self):
self.driver = webdriver.Chrome()
def parse(self, response):
self.driver.get(response.url)
categories = self.driver.find_elements(By.CSS_SELECTOR, '.mc .name a')
for category in categories:
url = category.get_attribute('href')
yield scrapy.Request(url, callback=self.parse_category)
def parse_category(self, response):
self.driver.get(response.url)
WebDriverWait(self.driver, 10).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.gl-item'))
)
items = self.driver.find_elements(By.CSS_SELECTOR, '.gl-item')
for item in items:
book_info = {
'name': item.find_element(By.CSS_SELECTOR, '.p-name a').text,
'price': item.find_element(By.CSS_SELECTOR, '.p-price strong').text,
'comment_num': item.find_element(By.CSS_SELECTOR, '.p-commit a').text,
'shop_name': item.find_element(By.CSS_SELECTOR, '.p-shop a').text,
}
yield book_info
def close(self, reason):
self.driver.close()
这个示例代码使用了Selenium的WebDriver来处理JavaScript渲染的内容。它首先通过Selenium访问书籍分类页面,然后获取所有书籍分类的URL,并为每个分类创建一个爬取请求。在parse_category
回调函数中,它使用Selenium获取书籍信息列表,并通过CSS选择器定位每本书的信息。最后,在爬虫关闭时,调用close
方法关闭WebDriver。这个例子展示了如何结合Scrapy和Selenium来处理动态加载的内容。
评论已关闭