Python大作业——爬虫+可视化+数据分析+数据库(爬虫篇)
import requests
from bs4 import BeautifulSoup
import pymysql
import pandas as pd
# 连接数据库
conn = pymysql.connect(host='localhost', user='your_username', password='your_password', db='job_db', charset='utf8')
cursor = conn.cursor()
# 创建表
cursor.execute("DROP TABLE IF EXISTS job_info")
cursor.execute("CREATE TABLE job_info(id INT PRIMARY KEY AUTO_INCREMENT, title VARCHAR(255), company VARCHAR(255), salary VARCHAR(255), city VARCHAR(255), experience VARCHAR(255), education VARCHAR(255), type VARCHAR(255), create_time VARCHAR(255), url VARCHAR(255))")
# 指定要爬取的网页
url = 'https://www.lagou.com/jobs/list_%E8%BD%AF%E4%BB%B6%E7%BC%96%E7%A8%8B%E5%B8%88?labelWords=label&fromSearch=true&suginput='
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
}
# 发送请求
response = requests.get(url, headers=headers)
# 解析网页
soup = BeautifulSoup(response.text, 'lxml')
# 提取信息
job_list = soup.find_all('div', class_='job-primary')
# 存储数据
for job in job_list:
title = job.find('div', class_='name').text
company = job.find('div', class_='company-text').text.strip()
salary = job.find('div', class_='money').text
city = job.find('div', class_='address').text
info = job.find('div', class_='li_com_tag').text
experience_education = info.split('|')
experience = experience_education[0].strip()
education = experience_education[1].strip()
type = job.find('div', class_='positionType').text
create_time = job.find('div', class_='pubTime').text
url = 'https://www.lagou.com' + job.find('a', class_='position_link')['href']
# 插入数据库
cursor.execute("INSERT INTO job_info(title, company, salary, city, experience, education, `type`, create_time, url) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (title, company, salary, city, experience, education, type, create_time, url))
conn.commit()
# 关闭数据库连接
cursor.close()
conn.close()
这段代码修复了原代码中的SQL注入问题,并且使用了参数化的查询来插入数据,这是一种更为安全的方式来处理数据库操作。同时,代码中的变量使用也遵守了Python的命名规范,并且修正了一些可能导致错误的语法问题
评论已关闭