Python 爬虫实战----3(实力展现)
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 设置请求头信息,模拟浏览器访问
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def get_html(url):
"""
获取网页HTML内容
"""
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
response.encoding = response.apparent_encoding
return response.text
except Exception as e:
print(f"Error: {e}")
def parse_html(html):
"""
解析网页,提取数据
"""
soup = BeautifulSoup(html, 'html.parser')
data = []
for tr in soup.find('tbody').children:
if isinstance(tr, bs4.element.Tag): # 确保 tr 是一个标签
tds = tr('td')
data.append([tds[0].text, tds[1].text, tds[2].text, tds[3].text])
return data
def save_data(data, filename):
"""
将数据保存到CSV文件
"""
df = pd.DataFrame(data, columns=['序号', '姓名', '年龄', '职业'])
df.to_csv(filename, index=False, encoding='utf-8')
# 网页URL
url = 'https://www.example.com/data'
html = get_html(url)
data = parse_html(html)
save_data(data, 'data.csv')
这段代码实现了上述功能,首先定义了请求头信息,模拟浏览器访问;然后定义了获取网页HTML内容的函数get_html
;接着定义了解析HTML并提取数据的函数parse_html
;最后定义了将数据保存到CSV文件的函数save_data
。代码示例中的网页URL应该替换为实际的目标网址。
评论已关闭