Python通过代理多线程抓取图片
生活随笔
收集整理的這篇文章主要介紹了
Python通过代理多线程抓取图片
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
前言
Python作為一門功能強大的腳本語言,經常被用來寫爬蟲程序,下面是Python通過代理多線程抓取圖片代碼
Python爬蟲多線程抓取代理服務器參考: http://www.linuxeye.com/program/1763.html
說明:
ps 代理服務器是從http://www.cnproxy.com/ (測試只選擇了8個頁面)抓取
ps 圖片網站地址:http://www.ivsky.com(測試只選擇了有限的頁面數)
#!/usr/bin/env python #BLOG:blog.linuxeye.com #coding:utf-8import urllib2 import re import threading import time import randomrawProxyList = [] checkedProxyList = [] imgurl_list = []#抓取代理網站 portdicts ={'v':"3",'m':"4",'a':"2",'l':"9",'q':"0",'b':"5",'i':"7",'w':"6",'r':"8",'c':"1"} targets = [] for i in xrange(1,9):target = r"http://www.cnproxy.com/proxy%d.html" % itargets.append(target) #print targets#抓取代理服務器正則 p = re.compile(r'''<tr><td>(.+?)<SCRIPT type=text/javascript>document.write\(":"\+(.+?)\)</SCRIPT></td><td>(.+?)</td><td>.+?</td><td>(.+?)</td></tr>''')#獲取代理的類 class ProxyGet(threading.Thread):def __init__(self,target):threading.Thread.__init__(self)self.target = targetdef getProxy(self):print "代理服務器目標網站: " + self.targetreq = urllib2.urlopen(self.target)result = req.read()#print chardet.detect(result)matchs = p.findall(result)for row in matchs:ip=row[0]port =row[1]port = map(lambda x:portdicts[x],port.split('+'))port = ''.join(port)agent = row[2]addr = row[3].decode("cp936").encode("utf-8")proxy = [ip,port,addr]#print proxyrawProxyList.append(proxy)def run(self):self.getProxy()#檢驗代理的類 class ProxyCheck(threading.Thread):def __init__(self,proxyList):threading.Thread.__init__(self)self.proxyList = proxyListself.timeout = 5self.testUrl = "http://www.baidu.com/"self.testStr = "030173"def checkProxy(self):cookies = urllib2.HTTPCookieProcessor()for proxy in self.proxyList:proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(proxy[0],proxy[1])})#print r'http://%s:%s' %(proxy[0],proxy[1])opener = urllib2.build_opener(cookies,proxyHandler)opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')] #urllib2.install_opener(opener)t1 = time.time()try:#req = urllib2.urlopen("http://www.baidu.com", timeout=self.timeout)req = opener.open(self.testUrl, timeout=self.timeout)#print "urlopen is ok...."result = req.read()#print "read html...."timeused = time.time() - t1pos = result.find(self.testStr)#print "pos is %s" %posif pos > 1:checkedProxyList.append((proxy[0],proxy[1],proxy[2],timeused))#print "ok ip: %s %s %s %s" %(proxy[0],proxy[1],proxy[2],timeused)else:continueexcept Exception,e:#print e.messagecontinuedef run(self):self.checkProxy()#獲取圖片地址函數 def imgurlList(url_home):global imgurl_listhome_page = urllib2.urlopen(url_home)url_re = re.compile(r'<li><a href="(.+?)" target="_blank" rel="nofollow">')pic_re = re.compile(r'<img src="(.*?\.\w{3,4})"')url_list = re.findall(url_re,home_page.read())for url in url_list:#print url_home+urlurl_page = urllib2.urlopen(url_home+url)for imgurlList in re.findall(pic_re,url_page.read()):imgurl_list.append(imgurlList)#下載圖片的類 class getPic(threading.Thread):def __init__(self,imgurl_list):threading.Thread.__init__(self)self.imgurl_list = imgurl_list self.timeout = 5def downloadimg(self):for imgurl in self.imgurl_list:pic_suffix = imgurl.split('.')[-1] #獲取圖片后綴pic_name = str(random.randint(0,10000000000))+'.'+pic_suffixcookies = urllib2.HTTPCookieProcessor()randomCheckedProxy = random.choice(checkedProxyList) #隨機取一組代理服務器proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(randomCheckedProxy[0],randomCheckedProxy[1])})opener = urllib2.build_opener(cookies,proxyHandler)opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')]urllib2.install_opener(opener)try:data_img = opener.open(imgurl,timeout=self.timeout)f = open (pic_name,'wb')f.write(data_img.read())f.close()except:continuedef run(self):self.downloadimg()if __name__ == "__main__":getThreads = []checkThreads = []imgurlList('http://www.ivsky.com')getPicThreads = []#對每個目標網站開啟一個線程負責抓取代理 for i in range(len(targets)):t = ProxyGet(targets[i])getThreads.append(t)for i in range(len(getThreads)):getThreads[i].start()for i in range(len(getThreads)):getThreads[i].join()print '.'*10+"總共抓取了%s個代理" %len(rawProxyList) +'.'*10#開啟20個線程負責校驗,將抓取到的代理分成20份,每個線程校驗一份 for i in range(20):t = ProxyCheck(rawProxyList[((len(rawProxyList)+19)/20) * i:((len(rawProxyList)+19)/20) * (i+1)])checkThreads.append(t)for i in range(len(checkThreads)):checkThreads[i].start()for i in range(len(checkThreads)):checkThreads[i].join()print '.'*10+"總共有%s個代理通過校驗" %len(checkedProxyList) +'.'*10#開啟20個線程隨機取一個代理下載圖片 for i in range(20):t = getPic(imgurl_list[((len(imgurl_list)+19)/20) * i:((len(imgurl_list)+19)/20) * (i+1)])getPicThreads.append(t)for i in range(len(getPicThreads)):getPicThreads[i].start()for i in range(len(getPicThreads)):getPicThreads[i].join()print '.'*10+"總共有%s個圖片下載" %len(imgurl_list) +'.'*10#代理排序持久化 f= open("proxy_list.txt",'w+') for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x[3],y[3])):#print "checked proxy is: %s:%s\t%s\t%s" %(proxy[0],proxy[1],proxy[2],proxy[3])f.write("%s:%s\t%s\t%s\n"%(proxy[0],proxy[1],proxy[2],proxy[3])) f.close()小編推薦一個學python的學習qun 491308659 驗證碼:南燭
無論你是大牛還是小白,是想轉行還是想入行都可以來了解一起進步一起學習!裙內有開發工具,很多干貨和技術資料分享!
測試結果:
# ls proxy_getpic.py # python proxy_getpic.py 代理服務器目標網站: http://www.cnproxy.com/proxy1.html 代理服務器目標網站: http://www.cnproxy.com/proxy2.html 代理服務器目標網站: http://www.cnproxy.com/proxy3.html 代理服務器目標網站: http://www.cnproxy.com/proxy4.html 代理服務器目標網站: http://www.cnproxy.com/proxy5.html 代理服務器目標網站: http://www.cnproxy.com/proxy6.html 代理服務器目標網站: http://www.cnproxy.com/proxy7.html 代理服務器目標網站: http://www.cnproxy.com/proxy8.html ..........總共抓取了800個代理.......... ..........總共有458個代理通過校驗.......... ..........總共有154個圖片下載.......... # cat proxy_list.txt | more 173.213.113.111:3128 United States 0.432188987732 173.213.113.111:8089 United States 0.441318035126 173.213.113.111:7808 United States 0.444597005844 110.4.24.170:80 香港 香港移動通訊有限公司 0.489440202713 211.142.236.135:8080 湖南省株洲市 移動 0.490673780441 211.142.236.135:8081 湖南省株洲市 移動 0.518096923828 211.142.236.135:8000 湖南省株洲市 移動 0.51860499382 211.142.236.135:8082 湖南省株洲市 移動 0.520448207855 # ls 1001117689.jpg 3097883176.jpg 5234319709.jpg 7012274766.jpg 8504924248.jpg 1076458640.jpg 3144369522.jpg 5387877704.jpg 7106183143.jpg 867723868.jpg 1198548712.jpg 3161307031.jpg 5572092752.jpg 7361254661.jpg 8746315373.jpg 165738192.jpg 3228008315.jpg 5575388077.jpg 7389537793.jpg 8848973192.jpg 1704512138.jpg 3306931164.jpg 5610740708.jpg 7407358698.jpg 8973834958.jpg 1742167711.jpg 3320152673.jpg 5717429022.jpg 7561176207.jpg 8976862152.jpg總結
以上是生活随笔為你收集整理的Python通过代理多线程抓取图片的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 手挽手带你学React:三档 React
- 下一篇: Mozilla与Ubisoft结盟一同开