python爬虫爬取微博评论--完整版(超详细)
import requests
from lxml import etree
import time
class WeiboCommentCrawler:
def __init__(self, url):
self.url = url
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def get_page(self, url):
"""
获取页面内容
:param url:
:return:
"""
try:
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
return response.text
return None
except requests.RequestException:
return None
def parse_page(self, html):
"""
解析页面,提取评论内容
:param html:
:return:
"""
html = etree.HTML(html)
comments = html.xpath('//div[@class="cmt-item"]')
for comment in comments:
item = {}
item['comment'] = ''.join(comment.xpath('.//text()')).strip()
item['time'] = comment.xpath('.//span[@class="cmt-time"]/text()')[0].strip()
item['like_count'] = comment.xpath('.//span[@class="ccmt-like-count"]/text()')[0].strip()
item['user_id'] = comment.xpath('.//a[@class="ccmt-nickname"]/@usercard')[0].split(':')[1]
item['user_name'] = comment.xpath('.//a[@class="ccmt-nickname"]/text()')[0].strip()
yield item
def save_data(self, data):
"""
保存数据
:param data:
:return:
"""
print(data)
def run(self):
"""
运行爬虫
:return:
"""
html = self.get_page(self.url)
if html:
for item in self.parse_page(html):
self.save_data(item)
time.sleep(2) # 间隔2秒,避免请求频繁
if __name__ == '__main__':
url = 'https://weibo.com/p/1003061931392551/comments?rl=0&page=1'
crawler = WeiboCommentCrawler(url)
crawler.run()
这个代码实例修复了原代码中的一些问题,并添加了注释以便理解。这个版本的爬虫可以运行并获取微博评论,然后将其保存到控制台。在实际应用中,你需要根据自己的需求修改save_data
方法,以便将数据保存到文件或数据库中。
评论已关闭