python-爬虫实例:获取b站的章若楠的视频
    		       		warning:
    		            这篇文章距离上次修改已过450天,其中的内容可能已经有所变动。
    		        
        		                
                
import requests
from bs4 import BeautifulSoup
import re
 
# 获取B站用户上传视频的网页
def get_video_page(user_id):
    url = f'https://space.bilibili.com/ajax/member/getSubmitVideos?mid={user_id}&pagesize=30&tid=0&page=1&keyword=&order=pubdate'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    response = requests.get(url, headers=headers)
    return response.text
 
# 解析视频信息
def parse_video_info(html):
    soup = BeautifulSoup(html, 'html.parser')
    video_list = soup.find_all('a', class_='title')
    for video in video_list:
        yield {
            'title': video.text.strip(),
            'url': 'https://www.bilibili.com' + video['href']
        }
 
# 下载视频
def download_video(video_url, video_title):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Referer': video_url
    }
    response = requests.get(video_url, headers=headers)
    html = response.text
    video_url = re.findall(r'\"video_url\":\"(https:\/\/.*?)\"', html)[0]
    video_data = requests.get(video_url, headers=headers).content
    with open(f'{video_title}.mp4', 'wb') as file:
        file.write(video_data)
 
# 主函数
def main(user_id):
    html = get_video_page(user_id)
    for video in parse_video_info(html):
        print(video)
        download_video(video['url'], video['title'])
 
if __name__ == '__main__':
    user_id = 28376665  # 章若楠的B站ID
    main(user_id)这段代码首先定义了获取B站用户上传视频的网页的函数get_video_page,然后定义了解析视频信息的函数parse_video_info,接着定义了下载视频的函数download_video,最后在main函数中调用这些函数来获取并下载章若楠的视频。这个例子展示了如何结合requests、BeautifulSoup和正则表达式来进行网页数据的抓取和处理。
评论已关闭