爬取豆瓣电影排名前250部电影并且存入Mongo数据库
2019獨角獸企業重金招聘Python工程師標準>>>
需求:爬取豆瓣電影top250(https://movie.douban.com/top250)的電影數據:
標題(title ),電影評分(score),電影信息(content),簡介?info。
一、分析頁面,用xpath得到相應的數據
????標題的xpath是://div[@class="info"]//span[@class="title"][1]/text()
????電影信息xpath://div[@class="info"]//div[@class="bd"]/p[1]
????電影評分xpath://div[@class="info"]//div[@class="star"]/span[2]/text()
????得到簡介的xpath://div[@class="info"]//span[@class="inq"]/text()
二、創建項目并且完成items.py
????創建項目命令:scrapy startproject douban
????進入項目目錄:cd douban
????創建爬蟲程序:scrapy genspider movetop250 douban.com
1.啟動程序的腳本---main.py
from scrapy import cmdlinecmdline.execute("scrapy crawl movetop250".split())2.items.py
import scrapyclass DoubanItem(scrapy.Item): # 電影標題 title = scrapy.Field() # 電影評分 score = scrapy.Field() # 電影信息 content = scrapy.Field() # 簡介 info = scrapy.Field()3.movetop250.py
import scrapyfrom douban.items import DoubanItemclass Movetop250Spider(scrapy.Spider): name = 'movetop250' allowed_domains = ['douban.com'] offset = 0 url = "https://movie.douban.com/top250?start=" start_urls = [url + str(offset)] def parse(self, response): moves = response.xpath('//div[@class="info"]') for move in moves: item = DoubanItem() # 電影名稱 title = move.xpath('.//span[@class="title"][1]/text()').extract()[0] # 點擊的信息,例如導演等 content = move.xpath('.//div[@class="bd"]/p[1]/text()').extract()[0] content = "".join(content).strip() # 評分 score = move.xpath('.//div[@class="star"]/span[2]/text()').extract()[0] # 電影一句話簡介 info = move.xpath('.//span[@class="inq"]/text()').extract() if len(info) > 0: info = info[0] item["title"] = titleitem["content"] = contentitem["score"] = scoreitem["info"] = infoyield item# 請求每一頁數據 if self.offset < 225: self.offset += 25 url = self.url + str(self.offset) yield scrapy.Request(url, callback=self.parse)4.pipelines.py
import jsonimport pymongo from scrapy.conf import settingsclass DoubanMongodbPipeline(object): def __init__(self): print("=====start=====") host = settings["MONGO_HOST"] port = settings["MONGO_PORT"] dbname = settings["MONGO_DBNAME"] sheetname = settings["MONGO_SHEETNAME"] print("host==", host) print("port==", port) print("dbname==", dbname) print("sheetname==", sheetname) # 創建客戶端 client = pymongo.MongoClient(host=host, port=port) # 得到或者創建數據庫對象 mydb = client[dbname] # 得到或者創建表 self.post = mydb[sheetname] def process_item(self, item, spider): # dict_json = dict(item) # json_str = json.dumps(dict_json, ensure_ascii=False) + "\n" # self.file.write(json_str) dict_item = dict(item) self.post.insert(dict_item) return itemdef close_spider(self, spider): print("======end======") self.file.close() class DoubanPipeline(object): def __init__(self): print("=====start=====") self.file = open("movetop250.json", "w", encoding="utf-8") def process_item(self, item, spider): dict_json = dict(item) json_str = json.dumps(dict_json, ensure_ascii=False) + "\n" self.file.write(json_str) return itemdef close_spider(self, spider): print("======close_spide======") self.file.close()5.settings.py
BOT_NAME = 'douban' SPIDER_MODULES = ['douban.spiders'] NEWSPIDER_MODULE = 'douban.spiders' ROBOTSTXT_OBEY = False # 一般情況不遵循爬蟲規則 DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)' ' Chrome/67.0.3396.99 Safari/537.36', } ITEM_PIPELINES = { 'douban.pipelines.DoubanPipeline': 301, 'douban.pipelines.DoubanMongodbPipeline': 300, } # 設置日志 LOG_FILE = "dongguan.log" LOG_LEVEL = "DEBUG" # 設置用戶代理 USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 " \"Safari/537.36" # 配置mongodb數據庫的信息 # mongo 主機 MONGO_HOST = "127.0.0.1" # mongo 端口 MONGO_PORT = 27017 # mongo 數據存放數據庫庫名稱 MONGO_DBNAME = "douban" # mongo 數據存放的表名稱 MONGO_SHEETNAME = "movetop250" # Disable cookies (enabled by default) COOKIES_ENABLED = False路過的關注一下,你的支持是我前進的動力
?
轉載于:https://my.oschina.net/u/3892643/blog/1845149
《新程序員》:云原生和全面數字化實踐50位技術專家共同創作,文字、視頻、音頻交互閱讀總結
以上是生活随笔為你收集整理的爬取豆瓣电影排名前250部电影并且存入Mongo数据库的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: PhpStorm代码格式化设置
- 下一篇: java 子类强转父类 父类强转子类