python 爬虫——爬取百度文库VIP内容
生活随笔
收集整理的這篇文章主要介紹了
python 爬虫——爬取百度文库VIP内容
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
- 轉載自:爬取百度文庫
代碼實現
import requests import re import json import ossession = requests.session()def fetch_url(url):return session.get(url).content.decode('gbk')def get_doc_id(url):return re.findall('view/(.*).html', url)[0]def parse_type(content):return re.findall(r"docType.*?\:.*?\'(.*?)\'\,", content)[0]def parse_title(content):return re.findall(r"title.*?\:.*?\'(.*?)\'\,", content)[0]def parse_doc(content):result = ''url_list = re.findall('(https.*?0.json.*?)\\\\x22}', content)url_list = [addr.replace("\\\\\\/", "/") for addr in url_list]for url in url_list[:-5]:content = fetch_url(url)y = 0txtlists = re.findall('"c":"(.*?)".*?"y":(.*?),', content)for item in txtlists:if not y == item[1]:y = item[1]n = '\n'else:n = ''result += nresult += item[0].encode('utf-8').decode('unicode_escape', 'ignore')return resultdef parse_txt(doc_id):content_url = 'https://wenku.baidu.com/api/doc/getdocinfo?callback=cb&doc_id=' + doc_idcontent = fetch_url(content_url)md5 = re.findall('"md5sum":"(.*?)"', content)[0]pn = re.findall('"totalPageNum":"(.*?)"', content)[0]rsign = re.findall('"rsign":"(.*?)"', content)[0]content_url = 'https://wkretype.bdimg.com/retype/text/' + doc_id + '?rn=' + pn + '&type=txt' + md5 + '&rsign=' + rsigncontent = json.loads(fetch_url(content_url))result = ''for item in content:for i in item['parags']:result += i['c'].replace('\\r', '\r').replace('\\n', '\n')return resultdef parse_other(doc_id):content_url = "https://wenku.baidu.com/browse/getbcsurl?doc_id=" + doc_id + "&pn=1&rn=99999&type=ppt"content = fetch_url(content_url)url_list = re.findall('{"zoom":"(.*?)","page"', content)url_list = [item.replace("\\", '') for item in url_list]if not os.path.exists(doc_id):os.mkdir(doc_id)for index, url in enumerate(url_list):content = session.get(url).contentpath = os.path.join(doc_id, str(index) + '.jpg')with open(path, 'wb') as f:f.write(content)print("圖片保存在" + doc_id + "文件夾")def save_file(filename, content):with open(filename, 'w', encoding='utf8') as f:f.write(content)print('已保存為:' + filename)def main():url = input('請輸入要下載的文庫URL地址_')content = fetch_url(url)doc_id = get_doc_id(url)type = parse_type(content)title = parse_title(content)if type == 'doc':result = parse_doc(content)save_file(title + '.txt', result)elif type == 'txt':result = parse_txt(doc_id)save_file(title + '.txt', result)else:parse_other(doc_id)if __name__ == "__main__":main()總結
以上是生活随笔為你收集整理的python 爬虫——爬取百度文库VIP内容的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Java_JDBC及连接池
- 下一篇: HDMI中继器,HDMI延长器支持更远音