Python爬虫子页面并写入text代码
这是工具类
class UrlManager():"""url管理器"""def __init__(self):self.new_urls = set()self.old_urls =set()def add_new_url(self,url):if url is None or len(url) == 0:returnif url in self.new_urls or url in self.old_urls:returnself.new_urls.add(url)def add_new_urls(self,urls):if urls is None or len(urls) == 0:returnfor url in urls:self.add_new_url(url)def get_url(self):if self.has_new_url():url = self.new_urls.pop()self.old_urls.add(url)return urlelse:return Nonedef has_new_url(self):return len(self.new_urls) > 0
这是实现代码,实现方式:传入一个母页面,可以爬虫出母页面中的链接,并将链接放入Url_manager,进行下一个页面的爬虫,并将爬虫出来的链接都写入一个text里面进行记录
from utils import Url_manager
import requests
from bs4 import BeautifulSoup
import re
root_url="https://xxxxxxx"urls= Url_manager.UrlManager()
urls.add_new_url(root_url)fout = open("OK.txt","w")
while urls.has_new_url():curr_url =urls.get_url()r = requests.get(curr_url)if r.status_code != 200:print("error,return status_code is not 200",curr_url)continuesoup = BeautifulSoup(r.text,"lxml")links = soup.find_all("a")for link in links:href =link.get("href")newnew_url =curr_url+hrefpattern1 = r'https://xxxxx20\d{2}/\d{2}/\d{8}/[a-zA-Z0-9]\S'if re.match(pattern1, newnew_url):url_ = newnew_url[29:]fout.write("%s\n" % url_)fout.flush()print("success:%s\n" % url_)if href is None:continuepattern =r'20\d+/|\d{2}/|\d{8}/'if re.match(pattern,href):url = curr_url+hrefurls.add_new_url(url)
fout.close()