Python学习之路-爬虫提高:框架功能完善
import requests
from bs4 import BeautifulSoup
import re
import os
class DoubanCrawler:
def __init__(self, start_url):
self.start_url = start_url
self.headers = {
'User-Agent': 'Mozilla/5.0',
'Cookie': 'your_cookie_here' # 替换为你的cookie
}
self.movie_details_urls = []
self.movies = []
def get_page_content(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
return response.text
return None
def parse_index_page(self, content):
soup = BeautifulSoup(content, 'html.parser')
movie_divs = soup.find_all('div', class_='item')
for movie_div in movie_divs:
movie_details_url = movie_div.find('a')['href']
self.movie_details_urls.append(movie_details_url)
def parse_detail_page(self, content):
soup = BeautifulSoup(content, 'html.parser')
movie_title = soup.find('span', property='v:itemreviewed').text
movie_rating_score = float(soup.find('strong', class_='ll rating_num').text)
movie_rating_people = int(re.findall('\d+', soup.find('div', class_='rating').text)[0])
movie_quote = soup.find('span', property='v:summary').text
self.movies.append({
'title': movie_title,
'rating_score': movie_rating_score,
'rating_people': movie_rating_people,
'quote': movie_quote
})
def run(self):
index_content = self.get_page_content(self.start_url)
if index_content:
self.parse_index_page(index_content)
for url in self.movie_details_urls:
detail_content = self.get_page_content(url)
if detail_content:
self.parse_detail_page(detail_content)
return self.movies
if __name__ == '__main__':
crawler = DoubanCrawler('https://movie.douban.com/top250')
movies = crawler.run()
print(movies)
这段代码实现了一个简单的基于Python的豆瓣电影TOP250爬虫。它首先通过请求库获取电影TOP250列表页的内容,然后使用BeautifulSoup进行页面解析以提取电影详情页的URL。接着,它遍历这些URL并获取每部电影的详细信息,包括电影名称、评分、评分人数和引言,最后将这些信息存储在一个列表中。这
评论已关闭