Python中的爬虫实战:58同城爬虫
import requests
from lxml import etree
import csv
def get_content(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
def parse_content(html):
html = etree.HTML(html)
job_list = html.xpath('//ul[@class="item_con_list"]/li')
for job in job_list:
job_title = job.xpath('./div[1]/h3/a/@title')[0]
job_link = job.xpath('./div[1]/h3/a/@href')[0]
job_company = job.xpath('./div[2]/div[1]/a[1]/text()')[0]
job_location = job.xpath('./div[2]/div[1]/span[1]/text()')[0]
job_salary = job.xpath('./div[2]/div[2]/div[1]/text()')[0]
job_info = job.xpath('./div[2]/div[2]/div[2]/p/text()')[0]
job_info = job_info.strip().replace('\n', '').replace(' ', '')
yield {
'title': job_title,
'link': job_link,
'company': job_company,
'location': job_location,
'salary': job_salary,
'info': job_info
}
def save_to_csv(data):
with open('58_jobs.csv', 'a', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=['title', 'link', 'company', 'location', 'salary', 'info'])
writer.writerow(data)
def main(offset):
url = f'https://www.58.com/jobs/?&cl=1&cityId=489&offset={offset}'
html = get_content(url)
for job in parse_content(html):
print(job)
save_to_csv(job)
if __name__ == '__main__':
for i in range(0, 30, 30): # 分页参数,这里只抓取3页的数据,实际可以根据需要抓取更多
main(i)
这段代码实现了58同城招聘信息的爬取,并将抓取的数据保存到CSV文件中。代码中使用了requests库来发送HTTP请求,使用lxml库来解析HTML内容,并通过XPath表达式来提取特定的元素。数据爬取的过程被封装在parse_content
函数中,该函数返回一个生成器,它能够高效地生成每一个职位的信息,并通过save_to_csv
函数保存到CSV文件中。最后,在main
函数中通过一个循环来迭代不同的分页,完成对多页数据的爬取。
评论已关闭