Python 爬虫下一代网络请求库 httpx 和 parsel 解析库测评
阅读本文大概需要 8 分钟。
requests + BeautifulSoup 组合
https://sh.lianjia.com/ershoufang/pudong/a3p5/, 先发送请求获取最大页数,然后循环发送请求解析单个页面提取我们所要的信息(比如小区名,楼层,朝向,总价,单价等信息),最后导出csv文件。如果你正在阅读本文,相信你对Python 爬虫已经有了一定了解,所以我们不会详细解释每一行代码。# homelink_requests.py# Author: 大江狗from fake_useragent import UserAgentimport requestsfrom bs4 import BeautifulSoupimport csvimport reimport timeclass HomeLinkSpider(object):def __init__(self):self.ua = UserAgent()self.headers = {"User-Agent": self.ua.random}self.data = list()self.path = "浦东_三房_500_800万.csv"self.url = "https://sh.lianjia.com/ershoufang/pudong/a3p5/"def get_max_page(self):response = requests.get(self.url, headers=self.headers)if response.status_code == 200:soup = BeautifulSoup(response.text, 'html.parser')a = soup.select('div[class="page-box house-lst-page-box"]')#使用eval是字符串转化为字典格式max_page = eval(a[0].attrs["page-data"])["totalPage"]return max_pageelse:print("请求失败 status:{}".format(response.status_code))return Nonedef parse_page(self):max_page = self.get_max_page()for i in range(1, max_page + 1):url = 'https://sh.lianjia.com/ershoufang/pudong/pg{}a3p5/'.format(i)response = requests.get(url, headers=self.headers)soup = BeautifulSoup(response.text, 'html.parser')ul = soup.find_all("ul", class_="sellListContent")li_list = ul[0].select("li")for li in li_list:detail = dict()detail['title'] = li.select('div[class="title"]')[0].get_text()# 2室1厅 | 74.14平米 | 南 | 精装 | 高楼层(共6层) | 1999年建 | 板楼house_info = li.select('div[class="houseInfo"]')[0].get_text()house_info_list = house_info.split(" | ")detail['bedroom'] = house_info_list[0]detail['area'] = house_info_list[1]detail['direction'] = house_info_list[2]floor_pattern = re.compile(r'd{1,2}')# 从字符串任意位置匹配match1 = re.search(floor_pattern, house_info_list[4])if match1:detail['floor'] = match1.group()else:detail['floor'] = "未知"# 匹配年份year_pattern = re.compile(r'd{4}')match2 = re.search(year_pattern, house_info_list[5])if match2:detail['year'] = match2.group()else:detail['year'] = "未知"# 文兰小区 - 塘桥, 提取小区名和哈快position_info = li.select('div[class="positionInfo"]')[0].get_text().split(' - ')detail['house'] = position_info[0]detail['location'] = position_info[1]# 650万,匹配650price_pattern = re.compile(r'd+')total_price = li.select('div[class="totalPrice"]')[0].get_text()detail['total_price'] = re.search(price_pattern, total_price).group()# 单价64182元/平米, 匹配64182unit_price = li.select('div[class="unitPrice"]')[0].get_text()detail['unit_price'] = re.search(price_pattern, unit_price).group()self.data.append(detail)def write_csv_file(self):head = ["标题", "小区", "房厅", "面积", "朝向", "楼层", "年份","位置", "总价(万)", "单价(元/平方米)"]keys = ["title", "house", "bedroom", "area", "direction","floor", "year", "location","total_price", "unit_price"]try:with open(self.path, 'w', newline='', encoding='utf_8_sig') as csv_file:writer = csv.writer(csv_file, dialect='excel')if head is not None:writer.writerow(head)for item in self.data:row_data = []for k in keys:row_data.append(item[k])# print(row_data)writer.writerow(row_data)print("Write a CSV file to path %s Successful." % self.path)except Exception as e:print("Fail to write CSV to path: %s, Case: %s" % (self.path, e))if __name__ == '__main__':start = time.time()home_link_spider = HomeLinkSpider()home_link_spider.parse_page()home_link_spider.write_csv_file()end = time.time()print("耗时:{}秒".format(end-start))
requests + parsel 组合
# BeautifulSoup的用法from bs4 import BeautifulSoupsoup = BeautifulSoup(response.text, 'html.parser')ul = soup.find_all("ul", class_="sellListContent")[0]# Parsel的用法, 使用Selector类from parsel import Selectorselector = Selector(response.text)ul = selector.css('ul.sellListContent')[0]# Parsel获取文本值或属性值案例selector.css('div.title span::text').get()selector.css('ul li a::attr(href)').get()>>> for li in selector.css('ul > li'):... print(li.xpath('.//@href').get())
extract()或extract_first()方法获取文本或属性值,在新版中已被get()和getall()方法替代。# homelink_parsel.py# Author: 大江狗from fake_useragent import UserAgentimport requestsimport csvimport reimport timefrom parsel import Selectorclass HomeLinkSpider(object):def __init__(self):self.ua = UserAgent()self.headers = {"User-Agent": self.ua.random}self.data = list()self.path = "浦东_三房_500_800万.csv"self.url = "https://sh.lianjia.com/ershoufang/pudong/a3p5/"def get_max_page(self):response = requests.get(self.url, headers=self.headers)if response.status_code == 200:# 创建Selector类实例selector = Selector(response.text)# 采用css选择器获取最大页码div Boxla = selector.css('div[class="page-box house-lst-page-box"]')# 使用eval将page-data的json字符串转化为字典格式max_page = eval(a[0].xpath('//@page-data').get())["totalPage"]print("最大页码数:{}".format(max_page))return max_pageelse:print("请求失败 status:{}".format(response.status_code))return Nonedef parse_page(self):max_page = self.get_max_page()for i in range(1, max_page + 1):url = 'https://sh.lianjia.com/ershoufang/pudong/pg{}a3p5/'.format(i)response = requests.get(url, headers=self.headers)selector = Selector(response.text)ul = selector.css('ul.sellListContent')[0]li_list = ul.css('li')for li in li_list:detail = dict()detail['title'] = li.css('div.title a::text').get()# 2室1厅 | 74.14平米 | 南 | 精装 | 高楼层(共6层) | 1999年建 | 板楼house_info = li.css('div.houseInfo::text').get()house_info_list = house_info.split(" | ")detail['bedroom'] = house_info_list[0]detail['area'] = house_info_list[1]detail['direction'] = house_info_list[2]floor_pattern = re.compile(r'd{1,2}')match1 = re.search(floor_pattern, house_info_list[4]) # 从字符串任意位置匹配if match1:detail['floor'] = match1.group()else:detail['floor'] = "未知"# 匹配年份year_pattern = re.compile(r'd{4}')match2 = re.search(year_pattern, house_info_list[5])if match2:detail['year'] = match2.group()else:detail['year'] = "未知"# 文兰小区 - 塘桥 提取小区名和哈快position_info = li.css('div.positionInfo a::text').getall()detail['house'] = position_info[0]detail['location'] = position_info[1]# 650万,匹配650price_pattern = re.compile(r'd+')total_price = li.css('div.totalPrice span::text').get()detail['total_price'] = re.search(price_pattern, total_price).group()# 单价64182元/平米, 匹配64182unit_price = li.css('div.unitPrice span::text').get()detail['unit_price'] = re.search(price_pattern, unit_price).group()self.data.append(detail)def write_csv_file(self):head = ["标题", "小区", "房厅", "面积", "朝向", "楼层","年份", "位置", "总价(万)", "单价(元/平方米)"]keys = ["title", "house", "bedroom", "area","direction", "floor", "year", "location","total_price", "unit_price"]try:with open(self.path, 'w', newline='', encoding='utf_8_sig') as csv_file:writer = csv.writer(csv_file, dialect='excel')if head is not None:writer.writerow(head)for item in self.data:row_data = []for k in keys:row_data.append(item[k])# print(row_data)writer.writerow(row_data)print("Write a CSV file to path %s Successful." % self.path)except Exception as e:print("Fail to write CSV to path: %s, Case: %s" % (self.path, e))if __name__ == '__main__':start = time.time()home_link_spider = HomeLinkSpider()home_link_spider.parse_page()home_link_spider.write_csv_file()end = time.time()print("耗时:{}秒".format(end-start))
现在我们来看下爬取结果,爬取 580 条数据耗时约 16.5 秒,节省了 2 秒时间。可见 parsel 比 BeautifulSoup 解析效率是要高的,爬取任务少时差别不大,任务多的话差别可能会大些。
httpx 同步 + parsel 组合
from fake_useragent import UserAgentimport csvimport reimport timefrom parsel import Selectorimport httpxclass HomeLinkSpider(object):def __init__(self):self.ua = UserAgent()self.headers = {"User-Agent": self.ua.random}self.data = list()self.path = "浦东_三房_500_800万.csv"self.url = "https://sh.lianjia.com/ershoufang/pudong/a3p5/"def get_max_page(self):# 修改这里把requests换成httpxresponse = httpx.get(self.url, headers=self.headers)if response.status_code == 200:# 创建Selector类实例selector = Selector(response.text)# 采用css选择器获取最大页码div Boxla = selector.css('div[class="page-box house-lst-page-box"]')# 使用eval将page-data的json字符串转化为字典格式max_page = eval(a[0].xpath('//@page-data').get())["totalPage"]print("最大页码数:{}".format(max_page))return max_pageelse:print("请求失败 status:{}".format(response.status_code))return Nonedef parse_page(self):max_page = self.get_max_page()for i in range(1, max_page + 1):url = 'https://sh.lianjia.com/ershoufang/pudong/pg{}a3p5/'.format(i)# 修改这里把requests换成httpxresponse = httpx.get(url, headers=self.headers)selector = Selector(response.text)ul = selector.css('ul.sellListContent')[0]li_list = ul.css('li')for li in li_list:detail = dict()detail['title'] = li.css('div.title a::text').get()# 2室1厅 | 74.14平米 | 南 | 精装 | 高楼层(共6层) | 1999年建 | 板楼house_info = li.css('div.houseInfo::text').get()house_info_list = house_info.split(" | ")detail['bedroom'] = house_info_list[0]detail['area'] = house_info_list[1]detail['direction'] = house_info_list[2]floor_pattern = re.compile(r'd{1,2}')match1 = re.search(floor_pattern, house_info_list[4]) # 从字符串任意位置匹配if match1:detail['floor'] = match1.group()else:detail['floor'] = "未知"# 匹配年份year_pattern = re.compile(r'd{4}')match2 = re.search(year_pattern, house_info_list[5])if match2:detail['year'] = match2.group()else:detail['year'] = "未知"# 文兰小区 - 塘桥 提取小区名和哈快position_info = li.css('div.positionInfo a::text').getall()detail['house'] = position_info[0]detail['location'] = position_info[1]# 650万,匹配650price_pattern = re.compile(r'd+')total_price = li.css('div.totalPrice span::text').get()detail['total_price'] = re.search(price_pattern, total_price).group()# 单价64182元/平米, 匹配64182unit_price = li.css('div.unitPrice span::text').get()detail['unit_price'] = re.search(price_pattern, unit_price).group()self.data.append(detail)def write_csv_file(self):head = ["标题", "小区", "房厅", "面积", "朝向", "楼层","年份", "位置", "总价(万)", "单价(元/平方米)"]keys = ["title", "house", "bedroom", "area", "direction","floor", "year", "location","total_price", "unit_price"]try:with open(self.path, 'w', newline='', encoding='utf_8_sig') as csv_file:writer = csv.writer(csv_file, dialect='excel')if head is not None:writer.writerow(head)for item in self.data:row_data = []for k in keys:row_data.append(item[k])# print(row_data)writer.writerow(row_data)print("Write a CSV file to path %s Successful." % self.path)except Exception as e:print("Fail to write CSV to path: %s, Case: %s" % (self.path, e))if __name__ == '__main__':start = time.time()home_link_spider = HomeLinkSpider()home_link_spider.parse_page()home_link_spider.write_csv_file()end = time.time()print("耗时:{}秒".format(end-start))
httpx 异步+ parsel 组合
# 异步 - 使用协程函数解析单页面,需传入单页面url地址async def parse_single_page(self, url):# 使用httpx发送异步请求获取单页数据async with httpx.AsyncClient() as client:response = await client.get(url, headers=self.headers)selector = Selector(response.text)# 其余地方一样def parse_page(self):max_page = self.get_max_page()loop = asyncio.get_event_loop()# Python 3.6之前用ayncio.ensure_future或loop.create_task方法创建单个协程任务# Python 3.7以后可以用户asyncio.create_task方法创建单个协程任务tasks = []for i in range(1, max_page + 1):url = 'https://sh.lianjia.com/ershoufang/pudong/pg{}a3p5/'.format(i)tasks.append(self.parse_single_page(url))# 还可以使用asyncio.gather(*tasks)命令将多个协程任务加入到事件循环loop.run_until_complete(asyncio.wait(tasks))loop.close()
from fake_useragent import UserAgentimport csvimport reimport timefrom parsel import Selectorimport httpximport asyncioclass HomeLinkSpider(object):def __init__(self):self.ua = UserAgent()self.headers = {"User-Agent": self.ua.random}self.data = list()self.path = "浦东_三房_500_800万.csv"self.url = "https://sh.lianjia.com/ershoufang/pudong/a3p5/"def get_max_page(self):response = httpx.get(self.url, headers=self.headers)if response.status_code == 200:# 创建Selector类实例selector = Selector(response.text)# 采用css选择器获取最大页码div Boxla = selector.css('div[class="page-box house-lst-page-box"]')# 使用eval将page-data的json字符串转化为字典格式max_page = eval(a[0].xpath('//@page-data').get())["totalPage"]print("最大页码数:{}".format(max_page))return max_pageelse:print("请求失败 status:{}".format(response.status_code))return None# 异步 - 使用协程函数解析单页面,需传入单页面url地址async def parse_single_page(self, url):async with httpx.AsyncClient() as client:response = await client.get(url, headers=self.headers)selector = Selector(response.text)ul = selector.css('ul.sellListContent')[0]li_list = ul.css('li')for li in li_list:detail = dict()detail['title'] = li.css('div.title a::text').get()# 2室1厅 | 74.14平米 | 南 | 精装 | 高楼层(共6层) | 1999年建 | 板楼house_info = li.css('div.houseInfo::text').get()house_info_list = house_info.split(" | ")detail['bedroom'] = house_info_list[0]detail['area'] = house_info_list[1]detail['direction'] = house_info_list[2]floor_pattern = re.compile(r'd{1,2}')match1 = re.search(floor_pattern, house_info_list[4]) # 从字符串任意位置匹配if match1:detail['floor'] = match1.group()else:detail['floor'] = "未知"# 匹配年份year_pattern = re.compile(r'd{4}')match2 = re.search(year_pattern, house_info_list[5])if match2:detail['year'] = match2.group()else:detail['year'] = "未知"# 文兰小区 - 塘桥 提取小区名和哈快position_info = li.css('div.positionInfo a::text').getall()detail['house'] = position_info[0]detail['location'] = position_info[1]# 650万,匹配650price_pattern = re.compile(r'd+')total_price = li.css('div.totalPrice span::text').get()detail['total_price'] = re.search(price_pattern, total_price).group()# 单价64182元/平米, 匹配64182unit_price = li.css('div.unitPrice span::text').get()detail['unit_price'] = re.search(price_pattern, unit_price).group()self.data.append(detail)def parse_page(self):max_page = self.get_max_page()loop = asyncio.get_event_loop()# Python 3.6之前用ayncio.ensure_future或loop.create_task方法创建单个协程任务# Python 3.7以后可以用户asyncio.create_task方法创建单个协程任务tasks = []for i in range(1, max_page + 1):url = 'https://sh.lianjia.com/ershoufang/pudong/pg{}a3p5/'.format(i)tasks.append(self.parse_single_page(url))# 还可以使用asyncio.gather(*tasks)命令将多个协程任务加入到事件循环loop.run_until_complete(asyncio.wait(tasks))loop.close()def write_csv_file(self):head = ["标题", "小区", "房厅", "面积", "朝向", "楼层","年份", "位置", "总价(万)", "单价(元/平方米)"]keys = ["title", "house", "bedroom", "area", "direction","floor", "year", "location","total_price", "unit_price"]try:with open(self.path, 'w', newline='', encoding='utf_8_sig') as csv_file:writer = csv.writer(csv_file, dialect='excel')if head is not None:writer.writerow(head)for item in self.data:row_data = []for k in keys:row_data.append(item[k])writer.writerow(row_data)print("Write a CSV file to path %s Successful." % self.path)except Exception as e:print("Fail to write CSV to path: %s, Case: %s" % (self.path, e))if __name__ == '__main__':start = time.time()home_link_spider = HomeLinkSpider()home_link_spider.parse_page()home_link_spider.write_csv_file()end = time.time()print("耗时:{}秒".format(end-start))
-
requests + BeautifulSoup: 18.5 秒 -
requests + parsel: 16.5 秒 -
httpx 同步 + parsel: 16.1 秒 -
httpx 异步 + parsel: 2.5 秒

End
「进击的Coder」专属学习群已正式成立,搜索「CQCcqc4」添加崔庆才的个人微信或者扫描下方二维码拉您入群交流学习。

点个在看你最好看
原文始发于微信公众号(进击的Coder):Python 爬虫下一代网络请求库 httpx 和 parsel 解析库测评





