python urllib2及beautifulsoup学习
生活随笔
收集整理的這篇文章主要介紹了
python urllib2及beautifulsoup学习
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
1、python urllib2爬蟲下載網頁的三種方法
#-*-coding:utf-8 -*- import urllib2 import cookieliburl = "http://www.baidu.com" print '第一種方法' response1 = urllib2.urlopen(url) print response1.getcode() print len(response1.read())print "第二種方法" request = urllib2.Request(url) request.add_header("use-agent","Mozilla/5.0") response2 = urllib2.urlopen(request) print response2.getcode() print len(response2.read())print "第三種方法" cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) response3 = urllib2.urlopen(url) print response3.getcode() print cj print response3.read()2、python pip安裝beautifulsoup4命令
windows:進入Python安裝目錄Scripts文件夾 dir命令查看是否安裝pip 輸入:pip install beautifulsoup43、網頁解析器 beautifulsoup語法基礎
#根據HTML網頁字符串創建BeautifulSoup對象 soup = BeautifulSoup(html_doc, #HTML文檔字符串'html.parser', #HTML解析器from_encoding='utf8' #HTML文檔的編碼)#方法:find_all(name,attrs,string) 查找所有標簽為a的節點 soup.find_all('a') #查找所有標簽為a,鏈接符合/view/123.htm形式的節點 soup.find_all('a',href='/view/123.htm') #查找所有標簽為div class為abc 文字為python的節點 soup.find_all('div',class_='abc',string='Python')#得到節點: <a href='1.html'>Python</a> #獲取查找到的節點的標簽名稱 node.name #獲取查找到的a節點的href屬性 node['href'] #獲取查找到的a節點的鏈接文字 node.get_text()4、網頁解析器 beautifulsoup語法練習
#-*- coding:utf-8 -*- from bs4 import BeautifulSoup import re html_doc="""百度新聞頁面源碼,篇幅原因源碼不寫進來了"""soup = BeautifulSoup(html_doc, 'html.parser',from_encoding='utf-8')print '獲取所有的鏈接' links = soup.find_all('a') for link in links:#print link.name,link['href'],link.get_text()print link.name, link.get('href'), link.get_text()print '獲取http://news.baidu.com的鏈接' link_node = soup.find('a',href='http://qijunjie.baijia.baidu.com/article/799732') print link_node.name, link_node.get('href'), link_node.get_text()print '正則匹配' link_node = soup.find('a',href=re.compile(r"qijunjie")) print link_node.name, link_node.get('href'), link_node.get_text()print '獲取a打開新頁面的鏈接' links= soup.find_all('a',target='_blank') for link in links:print link.name, link.get_text()5、網絡爬蟲示例
5.1調度器 spider_main.py#-*- coding:utf-8 -*-
from baike_spider import html_downloader,html_outputer,html_parser,url_managerclass SpiderMain(object):def __init__(self):self.urls = url_manager.UrlManage()self.downloader = html_downloader.HtmlDownloader()self.parser = html_parser.HtmlParser()self.outputer = html_outputer.HtmlOutputer()def craw(self,root_url):self.urls.add_new_url(root_url)count = 1while self.urls.has_new_url():try:new_url = self.urls.get_new_url()print 'craw %d : %s'%(count,new_url)html_cont = self.downloader.download(new_url)new_urls,new_data = self.parser.parse(new_url,html_cont)self.urls.add_new_urls(new_urls)self.outputer.collect_data(new_data)if count == 1000:breakcount = count + 1except:print 'craw failed'self.outputer.output_html()if __name__=="__main__":root_url = "http://baike.baidu.com/item/烏金木?sefr=cr"obj_spider = SpiderMain()obj_spider.craw(root_url) 5.2 url管理器 url_manager.py#-*- coding:utf-8 -*-
class UrlManage(object):def __init__(self):self.new_urls = set()self.old_urls = set()def add_new_url(self, url):if url is None:returnif url not in self.new_urls and url not in self.old_urls:self.new_urls.add(url)def has_new_url(self):return len(self.new_urls) != 0def get_new_url(self):new_url = self.new_urls.pop()self.old_urls.add(new_url)return new_urldef add_new_urls(self, urls):if urls is None or len(urls) == 0:returnfor url in urls:self.add_new_url(url) 5.3 下載器 html_downloader.py#-*- coding:utf-8 -*-
import urllib2class HtmlDownloader(object):def download(self, url):if url is None:return Noneresponse = urllib2.urlopen(url)if response.getcode() != 200:return Nonereturn response.read() 5.4 解析器 html_parser.py#-*- coding:utf-8 -*-
import re
import urlparsefrom bs4 import BeautifulSoupclass HtmlParser(object):def parse(self, page_url, html_cont):if page_url is None or html_cont is None:returnsoup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')new_urls = self._get_new_urls(page_url,soup)new_data = self._get_new_data(page_url,soup)return new_urls,new_datadef _get_new_urls(self, page_url, soup):new_urls = set()links = soup.find_all('a',href=re.compile(r"/view/\d+\.htm"))for link in links:new_url = link['href']new_full_url = urlparse.urljoin(page_url,new_url)new_urls.add(new_full_url)return new_urlsdef _get_new_data(self, page_url, soup):res_data = {}res_data['url'] = page_url#<dd class="lemmaWgt-lemmaTitle-title"><h1>烏金木</h1>title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")res_data['title'] = title_node.get_text()#<div class="lemma-summary" label-module="lemmaSummary">summary_node = soup.find('div',class_="lemma-summary")res_data['summary'] = summary_node.get_text()return res_data 5.5 輸出器 html_outputer.py
#-*- coding:utf-8 -*-
class HtmlOutputer(object):def __init__(self):self.datas = []def collect_data(self, data):if data is None:returnself.datas.append(data)def output_html(self):fout = open('out.html','w')fout.write("<html>")fout.write("<head>")fout.write("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />")fout.write("</head>")fout.write("<body>")fout.write("<table>")for data in self.datas:fout.write("<tr>")fout.write("<td>%s</td>" % data['url'])fout.write("<td>%s</td>" % data['title'].encode('utf-8'))fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))fout.write("</tr>")fout.write("</table>")fout.write("</body>")fout.write("</html>")總結
以上是生活随笔為你收集整理的python urllib2及beautifulsoup学习的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Jquery获取select标签的值、文
- 下一篇: 提高mysql千万级大数据SQL查询优化