爬取链家二手房数据做数据分析
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 设置请求头,模拟浏览器访问
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
# 房源信息列表
house_info_list = []
# 获取页面内容
def get_page_content(url):
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
# 解析页面数据
def parse_page(content):
soup = BeautifulSoup(content, 'lxml')
house_info = {}
# 获取房源标题
house_info['title'] = soup.select('.houseInfo-title')[0].text
# 获取房源价格
house_info['price'] = soup.select('.price-det')[0].text
# 获取房源地址
house_info['address'] = soup.select('.house-address')[0].text
# 获取房源描述
house_info['desc'] = soup.select('.house-brief')[0].text
return house_info
# 爬取数据
def crawl_data(url):
content = get_page_content(url)
if content:
house_info_list.append(parse_page(content))
# 爬取二手房数据
def crawl_house_data(page_num):
for i in range(1, page_num+1):
url = f'https://shanghai.lianjia.com/ershoufang/pg{i}/'
crawl_data(url)
# 保存数据到CSV文件
def save_to_csv(file_name):
df = pd.DataFrame(house_info_list)
df.to_csv(file_name, index=False, encoding='utf-8')
# 主函数
def main():
# 设置最大爬取页数
max_page = 10
crawl_house_data(max_page)
save_to_csv('lianjia_shanghai_ershoufang.csv')
if __name__ == '__main__':
main()
这段代码实现了在链家网上爬取上海地区二手房数据的基本功能。代码首先设置了请求头,模拟浏览器访问,然后定义了获取页面内容、解析页面数据和保存数据到CSV文件的函数。最后,在主函数中通过设置最大爬取页数,调用爬取数据和保存数据的函数来执行爬虫任务。这个例子简单易懂,适合作为学习爬虫技术的入门示例。
评论已关闭