python爬虫科研数据提取_python爬虫数据提取四之pyquery
1 pyquery
簡介:同樣是一個強大的網頁解析工具 它提供了和jQuery類似的語法來解析HTML文檔,支持CSS選擇器,使用非常方便
2 pyquery基本用法
2.1 安裝
pip install pyquery
2.2 初始化
字符串初始化
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
print(doc('li'))
first itemsecond itemthird itemfourth itemfifth itemURL初始化
from pyquery import PyQuery as pq
doc = pq(url='http://www.baidu.com')
print(doc('head'))
?�??o|??�??�??�?? ?°±?�¥é��文件初始化
from pyquery import PyQuery as pq
doc = pq(filename='test.html')
print(doc('li'))
111111111122222222223333333333444444444455555555552.3 基本CSS選擇器
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
print(doc('#container .list li'))
first itemsecond itemthird itemfourth itemfifth item2.4 查找元素
2.4.1 子元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
items = doc('.list')
print(type(items))
print(items)
lis = items.find('li')
print(type(lis))
print(lis)
- first item
- second item
- third item
- fourth item
- fifth item
lis = items.children()
print(type(lis))
print(lis)
first itemsecond itemthird itemfourth itemfifth itemlis = items.children('.active')
print(lis)
third itemfourth item注意:這里的item-0和active是兩個類,在html中可以給同一元素設置兩個類,中間用空格隔開
2.4.2 父元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
items = doc('.list')
container = items.parent()
print(type(container))
print(container)
- first item
- second item
- third item
- fourth item
- fifth item
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
items = doc('.list')
parents = items.parents()
print(type(parents))
print(parents)
- first item
- second item
- third item
- fourth item
- fifth item
- first item
- second item
- third item
- fourth item
- fifth item
parent = items.parents('.wrap')
print(parent)
- first item
- second item
- third item
- fourth item
- fifth item
2.4.3 兄弟元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.list .item-0.active')
print(li.siblings())
second itemfirst itemfourth itemfifth item可以看到,結果的順序并不是完全和輸入的順序一致
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.list .item-0.active') # 表示class為list的元素下的元素,這個元素的class為item-0和active
print(li.siblings('.active'))
fourth item2.5 遍歷
單個元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
third item多個元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
lis = doc('li').items()
print(type(lis))
for li in lis:
print(li)
first itemsecond itemthird itemfourth itemfifth item2.6 獲取信息
獲取屬性
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.attr('href'))
print(a.attr.href)
third item
link3.html
link3.html
獲取文本
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.text())
third item
third item
獲取HTML
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
print(li.html())
third itemthird item
2.7 DOM操作
addClass、removeClass
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.removeClass('active')
print(li)
li.addClass('active')
print(li)
third itemthird itemthird itemattr、css
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.attr('name', 'link')
print(li)
li.css('font-size', '14px')
print(li)
third itemthird itemthird itemremove
html = '''
Hello, World
This is a paragraph.
'''
from pyquery import PyQuery as pq
doc = pq(html)
wrap = doc('.wrap')
print(wrap.text())
wrap.find('p').remove()
print(wrap.text())
Hello, World
This is a paragraph.
Hello, World
2.8 偽類選擇器
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import PyQuery as pq
doc = pq(html)
li = doc('li:first-child')
print(li)
li = doc('li:last-child')
print(li)
li = doc('li:nth-child(2)')
print(li)
li = doc('li:gt(2)')# index > 2 的所有子元素,從0開始
print(li)
li = doc('li:nth-child(2n)')
print(li)
li = doc('li:contains(second)') # 根據文本選擇
print(li)
first itemfifth itemsecond itemfourth itemfifth itemsecond itemfourth itemsecond item3 實戰---爬取百度校花吧(pyquery版)
import requests
from lxml import etree
import os
from pyquery import PyQuery as pq
class Baidu(object):
def __init__(self, name):
self.url = 'http://tieba.baidu.com/f?ie=utf-8&kw={}'.format(name)
# 使用較老版本的請求頭,該瀏覽器不支持js
self.headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0) '
}
# 發送請求,獲取響應
def get_data(self, url):
response = requests.get(url,headers=self.headers)
return response.content
# 解析列表頁數據,獲取列表頁面帖子的標題和鏈接
def parse_list_page(self, data):
with open('baidu1.html','wb') as f:
f.write(data)
# 實例化etree對象
# html = etree.HTML(data).
doc = pq(data)
node_list= doc.find('.j_thread_list .threadlist_title a')
# 使用xpath語法,提取網頁數據
# node_list = html.xpath("//*[@id='thread_list']/li[@class=' j_thread_list clearfix']/div/div[2]/div[1]/div[1]/a")
# 判斷獲取結果
# print(len(node_list))
data_list = []
# 遍歷node_list
for node in node_list.items():
temp = {}
temp['url'] = 'http://tieba.baidu.com' + node.attr('href')
temp['title'] = node.text()
data_list.append(temp)
# 提取下一頁的節點
next_node = doc.find('#frs_list_pager .next').attr('href')
# next_node = html.xpath('//*[@id="frs_list_pager"]/a[last()-1]/@href')[0]
# print(next_node)
# 拼接下一頁的完整url
next_url = 'http:' + next_node
# print(next_url)
return data_list,next_url
def parse_detail_page(self, data_list):
html = etree.HTML(data_list)
doc = pq(data_list)
# 提取詳情頁面的圖片鏈接
imagelst = doc.find('.BDE_Image').items()
image_list = [img.attr('src') for img in imagelst]
# image_list = html.xpath("//cc/div[contains(@class,'d_post')]/img[@class='BDE_Image']/@src")
# 返回圖片節點列表
print(image_list)
return image_list
# 下載圖片,保存圖片文件
# 創建文件夾
def download(self, image_list):
if not os.path.exists('images1'):
os.makedirs('images1')
for image in image_list:
# os.sep在mac系統中是/,如果是windows系統,\\,跨平臺
file_name = 'images1'+ os.sep + image.split('/')[-1]
image_data = self.get_data(image)
with open(file_name,'wb') as f:
f.write(image_data)
def run(self):
# 構造url和請求頭
# 發送請求,獲取響應
next_url = self.url
# 開啟循環,
while next_url:
data = self.get_data(next_url)
# 解析列表頁數據,返回的列表數據、下一頁的的數據
data_list,next_url = self.parse_list_page(data)
# 解析詳情頁的數據,獲取詳情頁的圖片的鏈接地址
for data in data_list:
url = data['url']
result_list = self.get_data(url)
image_list = self.parse_detail_page(result_list)
# 保存數據,下載圖片
self.download(image_list)
if __name__ == '__main__':
# 爬取百度校花吧所有圖片并存在文件夾中
baidu = Baidu('校花吧')
baidu.run()
總結
以上是生活随笔為你收集整理的python爬虫科研数据提取_python爬虫数据提取四之pyquery的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: python cv2 matchtemp
- 下一篇: 常用的数据交换格式有哪些_大数据架构之数