【python爬虫】5.爬虫实操(歌词爬取)
    		       		warning:
    		            这篇文章距离上次修改已过442天,其中的内容可能已经有所变动。
    		        
        		                
                
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
 
# 设置代理服务器
proxies = {
    'http': 'http://12.34.56.79:8070',
    'https': 'http://12.34.56.79:8070',
}
 
def get_lyrics(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    res = requests.get(url, headers=headers, proxies=proxies)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'lxml')
    # 歌词通常在<div class="lyric-content">...</div>中
    lyric_content = soup.find('div', class_='lyric-content')
    if lyric_content:
        lyrics = lyric_content.get_text()
        # 清理多余的字符
        lyrics = re.sub(r'
<div class="katex-block">\[.*?\]</div>
', '', lyrics)
        return lyrics
    return '未找到歌词'
 
def main():
    df = pd.read_csv('music_data.csv', encoding='utf-8')
    for index, row in df.iterrows():
        url = row['url']
        try:
            lyrics = get_lyrics(url)
            print(f'正在抓取:{url}')
            with open(f'lyrics/{index}.txt', 'w', encoding='utf-8') as f:
                f.write(lyrics)
        except Exception as e:
            print(f'抓取失败:{e}')
 
if __name__ == '__main__':
    main()这段代码修复了原代码中的一些问题,并添加了异常处理,以确保在遇到网络问题或其他错误时代码不会中断。同时,代码中使用了更为推荐的requests库来发送HTTP请求,并使用了BeautifulSoup进行网页解析。代理服务器的设置也已经被正确地应用到了请求中。
评论已关闭