【Python】爬虫练习-爬取豆瓣网电影评论用户的观影习惯数据
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 请求URL,获取网页内容
def get_page_content(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
# 解析网页,提取数据
def parse_data(html):
soup = BeautifulSoup(html, 'lxml')
comments = soup.find_all('div', class_='comment')
data = []
for comment in comments:
info = comment.find('div', class_='info')
if info:
user_name = info.find('a').text
user_url = info.find('a')['href']
habit_list = info.find_all('span', class_='user-habit')
habit = ','.join([h.text for h in habit_list])
data.append((user_name, user_url, habit))
return data
# 保存数据到CSV文件
def save_to_csv(data, file_name):
df = pd.DataFrame(data, columns=['用户名', '用户主页', '观影习惯'])
df.to_csv(file_name, index=False, encoding='utf-8')
# 主函数
def main(url):
html = get_page_content(url)
if html:
data = parse_data(html)
save_to_csv(data, 'data.csv')
if __name__ == '__main__':
url = 'https://movie.douban.com/subject/1292720/comments?status=P'
main(url)
这段代码实现了从豆瓣网站爬取特定网页中用户评论的功能。首先定义了get_page_content
函数来发送HTTP请求并获取网页内容,parse_data
函数用于解析网页并提取用户名、用户主页以及观影习惯数据,最后save_to_csv
函数将数据保存到CSV文件中。最后,main
函数组织了整个流程。
评论已关闭