《大众点评爬虫程序实战:爬取店铺展示信息》
import requests
from bs4 import BeautifulSoup
import pymysql
# 连接数据库
conn = pymysql.connect(host='localhost', user='your_username', password='your_password', db='dianping', charset='utf8')
cursor = conn.cursor()
# 爬取店铺展示信息
def crawl_shop_info(shop_url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = requests.get(shop_url, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'lxml')
# 店铺名称
shop_name = soup.select('.shop-name')[0].text.strip()
# 店铺地址
shop_address = soup.select('.address')[0].text.strip()
# 店铺电话
shop_phone = soup.select('.phone')[0].text.strip()
# 店铺点评数
shop_comment_count = soup.select('.comment-count')[0].text.strip()
# 店铺评分
shop_score = soup.select('.shop-score')[0].text.strip()
# 店铺所在区域
shop_area = soup.select('.area')[0].text.strip()
# 店铺类型
shop_type = soup.select('.tag')[0].text.strip()
# 将爬取的信息插入数据库
insert_sql = "INSERT INTO shop_info(shop_name, shop_address, shop_phone, shop_comment_count, shop_score, shop_area, shop_type) VALUES (%s, %s, %s, %s, %s, %s, %s)"
cursor.execute(insert_sql, (shop_name, shop_address, shop_phone, shop_comment_count, shop_score, shop_area, shop_type))
conn.commit()
# 主函数,用于控制爬虫的运行
def main():
shop_url = 'https://www.dianping.com/shop/1234567' # 示例URL,实际应替换为你要爬取的店铺URL
crawl_shop_info(shop_url)
if __name__ == '__main__':
main()
这段代码实现了一个简单的爬虫程序,用于从大众点评网站上爬取单个店铺的展示信息,并将这些信息插入到名为dianping
的MySQL数据库中的shop_info
表中。在实际应用中,你需要替换数据库连接信息,并确保数据库已经创建好相应的表。同时,你还需要处理可能出现的异常,例如网络请求失败、解析错误等。
评论已关闭