import requests
from bs4 import BeautifulSoup
class SimpleCrawler:
def __init__(self, seed_url):
self.url_queue = [seed_url]
self.seen_urls = set()
def get_page(self, url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except requests.exceptions.RequestException:
return None
def parse_page(self, url, html_content):
soup = BeautifulSoup(html_content, 'html.parser')
for link in soup.find_all('a'):
new_url = link.get('href')
if new_url and new_url.startswith('http://') and new_url not in self.seen_urls:
self.url_queue.append(new_url)
self.seen_urls.add(new_url)
def crawl(self):
while self.url_queue:
url = self.url_queue.pop()
print(f'Crawling: {url}')
html_content = self.get_page(url)
if html_content:
self.parse_page(url, html_content)
if __name__ == '__main__':
crawler = SimpleCrawler('http://example.com')
crawler.crawl()
这个简易的爬虫会从给定的种子URL开始,通过requests.get
函数获取页面内容,并使用BeautifulSoup解析页面中的链接。每解析一个页面,它都会检查该页面中的所有链接,添加新的URL到队列中,并跟踪哪些URL已经被访问过,以防止无限循环和重复爬取。这个简单的爬虫示例展示了如何使用Python进行网络爬取,并且是学习进行网络爬虫开发的基础。