Python 爬虫项目实战:爬取某云热歌榜歌曲
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 设置请求头,模拟浏览器访问
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def get_html(url):
"""发送请求,获取网页内容"""
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
def parse_html(html):
"""解析网页,提取数据"""
soup = BeautifulSoup(html, 'lxml')
ranks = soup.find_all(class_='num')
songs = soup.find_all(class_='song-name')
artists = soup.find_all(class_='singer-name')
data = {
'排名': [rank.get_text() for rank in ranks],
'歌名': [song.get_text() for song in songs],
'艺人': [artist.get_text() for artist in artists]
}
return data
def save_data(data, filename):
"""保存数据到CSV文件"""
df = pd.DataFrame(data)
df.to_csv(filename + '.csv', index=False, encoding='utf-8-sig')
def main():
"""主函数,控制流程"""
url = 'https://music.163.com/#/discover/toplist?id=3778678'
html = get_html(url)
data = parse_html(html)
save_data(data, '云音乐热歌榜')
if __name__ == '__main__':
main()
这段代码实现了获取某云音乐热歌榜的信息,提取了歌名、艺人和排名,并将其保存到CSV文件中。代码使用了requests库来发送HTTP请求,使用BeautifulSoup库来解析网页,使用pandas库来保存数据。同时,代码中加入了请求头来模拟浏览器访问,以避免被反爬机制阻止。
评论已关闭