微博抓取尝试
微信是比較封閉的,微博的好友信息比較開放,都可以抓到;
1)找人,通過關(guān)注列表;
2)提取出微博的數(shù)據(jù),放到數(shù)據(jù)庫;
微博昵稱,頭像;
關(guān)注,粉絲及微博數(shù)量;
根據(jù)一些基本的原則來決定是否將該用戶的微博入待爬的隊(duì)列;
指標(biāo):關(guān)注人數(shù);
粉絲人數(shù);但是有可能會很多人,而且有很多僵尸粉;(不好:第一,低效;
第二,平臺也不會讓你無限制的往下翻頁,肯定會有限制)
微博數(shù),粉絲數(shù)的數(shù)量是個(gè)重要的參考點(diǎn);
怎么判斷抓取的人不值得關(guān)注?可以先做一個(gè)定向的分析,分析你所抓的領(lǐng)域的人的微博大致情況;
??? 1)如果發(fā)布的微博數(shù)量特別少,可以認(rèn)為是僵尸用戶,不用爬;微博數(shù)小于某個(gè)下限;
??? 2)如果發(fā)布的微博數(shù)量特別多,比如每天發(fā)100多條,可能是小廣告商或者機(jī)器人;
??? 3)對于轉(zhuǎn)發(fā)的微博其實(shí)和僵尸的微博差不多,你可能爬了大量的微博發(fā)現(xiàn)都是重復(fù)的信息;
# 下面列出部分代碼如下:
# -*- coding: utf-8 -*-
"""
Created on Sun Apr? 1 10:18:42 2018
@author: Joe3223
"""
# -*- coding:utf-8 -*-
#!/usr/bin/env python3
import time
import os
import re
from bs4 import BeautifulSoup
from urllib.request import urlopen
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#import pymongo
#from pymongo import MongoClient
import hashlib
from collections import deque
from lxml import etree
import threading
# 數(shù)據(jù)庫的準(zhǔn)備,這里用的是mongodb;
#client = MongoClient('localhost',27017)
#db = client.test
#followers = db.followers
# 注意:這里如果不設(shè)置user-agent,可能是無法跳轉(zhuǎn)的
user_agent = (
??? "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) " +
??? "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36"
)
##dcap = dict(DesiredCapabilities.PHANTOMJS)
##dcap["phantomjs.page.settings.userAgent"] = user_agent
dcap = dict(DesiredCapabilities.FIREFOX)
dcap["firefox.page.settings.userAgent"] = user_agent
#browserPath = '/opt/phantomjs-2.1.1-linux-x86_64/bin/phantomjs'
#browserPath = '/usr/bin/phantomjs'
# 基本參數(shù)的一些準(zhǔn)備工作
parser = 'html5lib'
domain = "weibo.com"
url_home = "http://" + domain
download_bf = deque()??????????????? # 雙向隊(duì)列,用于保證多線程爬取是安全的
cur_queue = deque()
min_mblogs_allowed = 10????????????? # 爬取的閾值設(shè)置
max_follow_fans_ratio_allowed = 3
# 這里有兩個(gè)爬蟲,一個(gè)爬取微博數(shù)據(jù),一個(gè)爬取用戶數(shù)據(jù)
weibo_driver = webdriver.Firefox()? # 微博爬蟲
weibo_driver.set_window_size(1920, 1200)? # optional
# url入隊(duì)列,當(dāng)然,入隊(duì)列前要先做查重?? ?
def enqueueUrl(url):
??? try:
??????? md5v = hashlib.md5(url).hexdigest()
??????? if md5v not in download_bf: # 去重
??????????? print(url + ' is added to queue')
??????????? cur_queue.append(url)
??????????? download_bf.append(md5v)
??????? # else:
??????????? # print 'Skip %s' % (url)
??? except ValueError:
??????? pass
# 隊(duì)列左端彈出一個(gè)值
def dequeuUrl():
??? return cur_queue.popleft()
# 到下一頁取抓取?? ??? ?
def go_next_page(cur_driver):
??? try:
??????? next_page = cur_driver.find_element_by_xpath('//a[contains(@class, "page next")]').get_attribute('href')
??????? print('next page is ' + next_page)
??????? cur_driver.get(next_page)
??????? time.sleep(3)
??????? return True
??? except Exception:
??????? print('next page is not found')
??????? return False
# 通過xpath嘗試獲取元素,最多嘗試6次?? ?
def get_element_by_xpath(cur_driver, path):
??? tried = 0
??? while tried < 6:
??????? html = cur_driver.page_source
??????? tr = etree.HTML(html)
??????? elements = tr.xpath(path)
??????? if len(elements) == 0:
??????????? time.sleep(1)
??????????? continue
??????? return elements
# 滾屏,保證能抓到數(shù)據(jù)?? ??? ??? ?
def scroll_to_bottom():
??? # 最多嘗試 50 次滾屏
??? print('scroll down')
??? for i in range(0,50):
??????? # print 'scrolling for the %d time' % (i)
??????? weibo_driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
??????? html = weibo_driver.page_source
??????? tr = etree.HTML(html)
??????? next_page_url = tr.xpath('//a[contains(@class,"page next")]')
??????? if len(next_page_url) > 0:
??????????? return next_page_url[0].get('href')
??????? if len(re.findall('點(diǎn)擊重新載入', html)) > 0:
??????????? print('scrolling failed, reload it')
??????????? weibo_driver.find_element_by_link_text('點(diǎn)擊重新載入').click()
??????? time.sleep(1)
# 提取微博數(shù)據(jù)
def extract_feed(feeds):
??? for i in range(0,20):
?? ?# 只有在抓取微博數(shù)據(jù)時(shí)需要滾屏
??????? scroll_to_bottom()
??????? for element in weibo_driver.find_elements_by_class_name('WB_detail'):
??????????? tried = 0
??????????? while tried < 3:
??????????????? try:
??????????????????? feed = {}
??????????????????? feed['time'] = element.find_element_by_xpath('.//div[@class="WB_from S_txt2"]').text
??????????????????? feed['content'] = element.find_element_by_class_name('WB_text').text
??????????????????? feed['image_names'] = []
??????????????????? for image in element.find_elements_by_xpath('.//li[contains(@class,"WB_pic")]/img'):
??????????????????????? feed['image_names'].append(re.findall('/([^/]+)$', image.get_attribute('src')))
??????????????????? feeds.append(feed)
??????????????????? print('--------------------')
??????????????????? print(feed['time'])
??????????????????? print(feed['content'])
??????????????????? break
??????????????? except Exception:
??????????????????? tried += 1
??????????????????? time.sleep(1)
?? ??? ?# 微博信息的下一頁
??????? if go_next_page(weibo_driver) is False:
??????????? return feeds
def getFollows(pageInfo):
??? pattern3 = re.compile('class="S_txt1" title="(.*?)".*?usercard')
??? follows = re.findall(pattern3, pageInfo)
??? print(follows)
??? for i in follows:
??????? print(i)
??????? #follower = {"name":i,"type":"follower"}
??????? #rs = followers.insert_one(follower)
??????? #print('one insert:{0}'.format(rs.inserted_id))
?? ?
??? ##urlsToScrawl = []
??? ##urlsScrawled = []
??? patterUrls = re.compile('<a bpfilter="page" class="page S_txt1"[\s\S]*?href="([\s\S]*?pids=Pl_Official_RelationMyfollow__92&cfs=&Pl_Official_RelationMyfollow__92_page=[\s\S]*?)"')
??? follows = re.findall(patterUrls, pageInfo)
??? for i in follows:
??????? print("http://weibo.com/"+i)
??????? ##if i not in urlsScrawled and i not in urlsToScrawl:
??????? ##urlsToScrapy.append("http://weibo.com/"+i)?? ?
def login(current_driver,username, password):
??? #driver = webdriver.PhantomJS(executable_path=browserPath)? #瀏覽器的地址
??? #driver = webdriver.PhantomJS(desired_capabilities=dcap)
??? #driver = webdriver.Firefox()
??? #driver.set_window_size(1920, 1200)
?? ?
??? current_driver.get(url_home)? #訪問目標(biāo)網(wǎng)頁地址
??? #bsObj = BeautifulSoup(user_driver.page_source, parser)? #解析目標(biāo)網(wǎng)頁的 Html 源碼
??? time.sleep(10)
??? #user_driver.save_screenshot("weiboLogin0.png")
??? # 登錄
??? current_driver.find_element_by_id('loginname').send_keys(username)
??? #user_driver.find_element_by_id('password').send_keys(password)
??? #user_driver.find_element_by_xpath('//div[contains(@class,"input_wrap ")][0]/input').send_keys(password)
??? current_driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[2]/div[1]/div[2]/div/div[2]/div[1]/div[2]/div[1]/div/div/div/div[3]/div[2]/div/input').send_keys(password)
??? # 執(zhí)行 click()
??? current_driver.find_element_by_xpath('//div[contains(@class,"login_btn")][1]/a').click()
??? time.sleep(8)
??? current_driver.save_screenshot("weiboLogin.png")
??? ##verifyCode = input("Please input verify code:")?????????? ?
??? ##user_driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[2]/div[1]/div[2]/div/div[2]/div[1]/div[2]/div[1]/div/div/div/div[3]/div[3]/div/input').send_keys(verifyCode)
??? ##user_driver.find_element_by_xpath('//div[contains(@class,"login_btn")][1]/a').click()
??? ##time.sleep(8)
??? ##user_driver.save_screenshot("weiboLogin2.png")
?? ?
def main(username, password):
??? # 登錄
??? #login(user_driver,username, password)
??? login(weibo_driver,username, password)
?? ?
??? # 等會操作
??? time.sleep(30)
??? #user_driver.save_screenshot("weibo.png")
?? ??? ???
??? ## 從大V的入口進(jìn)去爬取,真正的URL入口
??? user_link = "https://weibo.com/u/3738542230?topnav=1&wvr=6&topsug=1&is_hot=1"
??? print('downloading ' + user_link)
??? weibo_driver.get(user_link)
??? time.sleep(5)
?? ?
??? # 提取用戶姓名
??? account_name = get_element_by_xpath(weibo_driver, '//h1')[0].text
??? photo = get_element_by_xpath(weibo_driver, '//p[@class="photo_wrap"]/img')[0].get('src')
??? account_photo = re.findall('/([^/]+)$', photo)
??? # 提取他的關(guān)注主頁
??? follows_link = get_element_by_xpath(weibo_driver, '//a[@class="t_link S_txt1"]')[0].get('href')
??? print('account: ' + account_name)
??? print('account_photo: '+account_photo[0])
??? print('follows link is ' + follows_link)
??? #user_driver.get("http"+follows_link)
??? feeds = []
??? #users = []
?? ? # 起一個(gè)線程取獲取微博數(shù)據(jù)
??? t_feeds = threading.Thread(target=extract_feed, name=None, args=(feeds,))
??? t_feeds.start()
??? t_feeds.join()
?? ?
if __name__ == '__main__':
??? main("你的用戶","你的密碼")
??? #login(user_driver,"570876459@qq.com", "xiaowuwu!!!")
??? #login(weibo_driver,username, pass
轉(zhuǎn)載于:https://www.cnblogs.com/daluozi/p/9466430.html
總結(jié)
- 上一篇: Python(pycharm)在wind
- 下一篇: poj 1979 Red and Bla