python爬虫:爬取链家深圳全部二手房的详细信息
生活随笔
收集整理的這篇文章主要介紹了
python爬虫:爬取链家深圳全部二手房的详细信息
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
1、問題描述:
爬取鏈家深圳全部二手房的詳細(xì)信息,并將爬取的數(shù)據(jù)存儲到CSV文件中
2、思路分析:
(1)目標(biāo)網(wǎng)址:https://sz.lianjia.com/ershoufang/
(2)代碼結(jié)構(gòu):
class LianjiaSpider(object):
def __init__(self):
def getMaxPage(self, url): # 獲取maxPage
def parsePage(self, url): # 解析每個page,獲取每個huose的Link
def parseDetail(self, url): # 根據(jù)Link,獲取每個house的詳細(xì)信息
(3) init(self)初始化函數(shù)
· hearders用到了fake_useragent庫,用來隨機生成請求頭。
· datas空列表,用于保存爬取的數(shù)據(jù)。
def __init__(self):
self.headers = {"User-Agent": UserAgent().random}
self.datas = list()
(4) getMaxPage()函數(shù)
主要用來獲取二手房頁面的最大頁數(shù).
def getMaxPage(self, url):
response = requests.get(url, headers = self.headers)
if response.status_code == 200:
source = response.text
soup = BeautifulSoup(source, "html.parser")
pageData = soup.find("div", class_ = "page-box house-lst-page-box")["page-data"]
# pageData = '{"totalPage":100,"curPage":1}',通過eval()函數(shù)把字符串轉(zhuǎn)換為字典
maxPage = eval(pageData)["totalPage"]
return maxPage
else:
print("Fail status: {}".format(response.status_code))
return None
(5)parsePage()函數(shù)
主要是用來進(jìn)行翻頁的操作,得到每一頁的所有二手房的Links鏈接。它通過利用一個for循環(huán)來重構(gòu) url實現(xiàn)翻頁操作,而循環(huán)最大頁數(shù)就是通過上面的 getMaxPage() 來獲取到。
def parsePage(self, url):
maxPage = self.getMaxPage(url)
# 解析每個page,獲取每個二手房的鏈接
for pageNum in range(1, maxPage+1 ):
url = "https://sz.lianjia.com/ershoufang/pg{}/".format(pageNum)
print("當(dāng)前正在爬取: {}".format(url))
response = requests.get(url, headers = self.headers)
soup = BeautifulSoup(response.text, "html.parser")
links = soup.find_all("div", class_ = "info clear")
for i in links:
link = i.find("a")["href"] #每個<info clear>標(biāo)簽有很多<a>,而我們只需要第一個,所以用find
detail = self.parseDetail(link)
self.datas.append(detail)
(6)parseDetail()函數(shù)
根據(jù)parsePage()函數(shù)獲取的二手房Link鏈接,向該鏈接發(fā)送請求,獲取出詳細(xì)頁面信息。
def parseDetail(self, url):
response = requests.get(url, headers = self.headers)
detail = {}
if response.status_code == 200:
soup = BeautifulSoup(response.text, "html.parser")
detail["價格"] = soup.find("span", class_ = "total").text
detail["單價"] = soup.find("span", class_ = "unitPriceValue").text
detail["小區(qū)"] = soup.find("div", class_ = "communityName").find("a", class_ = "info").text
detail["位置"] = soup.find("div", class_="areaName").find("span", class_="info").text
detail["地鐵"] = soup.find("div", class_="areaName").find("a", class_="supplement").text
base = soup.find("div", class_ = "base").find_all("li") # 基本信息
detail["戶型"] = base[0].text[4:]
detail["面積"] = base[2].text[4:]
detail["朝向"] = base[6].text[4:]
detail["電梯"] = base[10].text[4:]
return detail
else:
return None
(7)將數(shù)據(jù)存儲到CSV文件中
這里用到了 pandas 庫的 DataFrame() 方法,它默認(rèn)的是按照列名的字典順序排序的。想要自定義列的順序,可以加columns字段。
# 將所有爬取的二手房數(shù)據(jù)存儲到csv文件中
data = pd.DataFrame(self.datas)
# columns字段:自定義列的順序(DataFrame默認(rèn)按列名的字典序排序)
columns = ["小區(qū)", "戶型", "面積", "價格", "單價", "朝向", "電梯", "位置", "地鐵"]
data.to_csv(".Lianjia_II.csv", encoding='utf_8_sig', index=False, columns=columns)
3、效果展示
4、完整代碼:
# -* coding: utf-8 *-
#author: wangshx6
#data: 2018-11-07
#descriptinon: 爬取鏈家深圳全部二手房的詳細(xì)信息,并將爬取的數(shù)據(jù)存儲到CSV文
import requests
from bs4 import BeautifulSoup
import pandas as pd
from fake_useragent import UserAgent
class LianjiaSpider(object):
def __init__(self):
self.headers = {"User-Agent": UserAgent().random}
self.datas = list()
def getMaxPage(self, url):
response = requests.get(url, headers = self.headers)
if response.status_code == 200:
source = response.text
soup = BeautifulSoup(source, "html.parser")
pageData = soup.find("div", class_ = "page-box house-lst-page-box")["page-data"]
# pageData = '{"totalPage":100,"curPage":1}',通過eval()函數(shù)把字符串轉(zhuǎn)換為字典
maxPage = eval(pageData)["totalPage"]
return maxPage
else:
print("Fail status: {}".format(response.status_code))
return None
def parsePage(self, url):
maxPage = self.getMaxPage(url)
# 解析每個page,獲取每個二手房的鏈接
for pageNum in range(1, maxPage+1 ):
url = "https://sz.lianjia.com/ershoufang/pg{}/".format(pageNum)
print("當(dāng)前正在爬取: {}".format(url))
response = requests.get(url, headers = self.headers)
soup = BeautifulSoup(response.text, "html.parser")
links = soup.find_all("div", class_ = "info clear")
for i in links:
link = i.find("a")["href"] #每個<info clear>標(biāo)簽有很多<a>,而我們只需要第一個,所以用find
detail = self.parseDetail(link)
self.datas.append(detail)
# 將所有爬取的二手房數(shù)據(jù)存儲到csv文件中
data = pd.DataFrame(self.datas)
# columns字段:自定義列的順序(DataFrame默認(rèn)按列名的字典序排序)
columns = ["小區(qū)", "戶型", "面積", "價格", "單價", "朝向", "電梯", "位置", "地鐵"]
data.to_csv(".Lianjia_II.csv", encoding='utf_8_sig', index=False, columns=columns)
def parseDetail(self, url):
response = requests.get(url, headers = self.headers)
detail = {}
if response.status_code == 200:
soup = BeautifulSoup(response.text, "html.parser")
detail["價格"] = soup.find("span", class_ = "total").text
detail["單價"] = soup.find("span", class_ = "unitPriceValue").text
detail["小區(qū)"] = soup.find("div", class_ = "communityName").find("a", class_ = "info").text
detail["位置"] = soup.find("div", class_="areaName").find("span", class_="info").text
detail["地鐵"] = soup.find("div", class_="areaName").find("a", class_="supplement").text
base = soup.find("div", class_ = "base").find_all("li") # 基本信息
detail["戶型"] = base[0].text[4:]
detail["面積"] = base[2].text[4:]
detail["朝向"] = base[6].text[4:]
detail["電梯"] = base[10].text[4:]
return detail
else:
return None
if __name__ == "__main__":
Lianjia = LianjiaSpider()
Lianjia.parsePage("https://sz.lianjia.com/ershoufang/")
總結(jié)
以上是生活随笔為你收集整理的python爬虫:爬取链家深圳全部二手房的详细信息的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 微信mac版本是什么
- 下一篇: lucky前面加a还是an_lucky的