实践 | 图片文本爬虫与数据分析
生活随笔
收集整理的這篇文章主要介紹了
实践 | 图片文本爬虫与数据分析
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
python復雜操作實踐:爬蟲與數據分析
- 機器學習-實踐五:明星圖片爬取
- 機器學習-實踐七:科比職業生涯數據爬取與分析
- 機器學習-實踐六:股票行情爬取與分析
機器學習-實踐五:明星圖片爬取
# 直接使用程序爬取網絡數據會被網站識別出來,然后封禁該IP,導致數據爬 # 取中斷,所以我們需要首先將程序訪問頁面偽裝成瀏覽器訪問頁面 # User-Agent:定義一個真實瀏覽器的代理名稱,表明自己的身份(是哪種瀏覽器),本demo為谷歌瀏覽器 # Accept:告訴WEB服務器自己接受什么介質類型,*/* 表示任何類型 # Referer:瀏覽器向WEB服務器表明自己是從哪個網頁URL獲得點擊當前請求中的網址/URL # Connection:表示是否需要持久連接 # Accept-Language:瀏覽器申明自己接收的語言 # Accept-Encoding:瀏覽器申明自己接收的編碼方法,通常指定壓縮方法,是 # 否支持壓縮,支持什么壓縮方法(gzip,deflate) def getPicinfo(url):headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36","Accept": "*/*","Referer": "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd=%E4%B8%AD%E5%9B%BD%E8%89%BA%E4%BA%BA&fenlei=256&rsv_pq=cf6f24c500067b9f&rsv_t=c2e724FZlGF9fJYeo9ZV1I0edbhV0Z04aYY%2Fn6U7qaUoH%2B0WbUiKdOr8JO4&rqlang=cn&rsv_dl=ib&rsv_enter=1&rsv_sug3=15&rsv_sug1=6&rsv_sug7=101","Host": "sp0.baidu.com","Connection": "keep-alive","Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6","Accept-Encoding": "gzip, deflate"}# 根據url,使用get()方法獲取頁面內容,返回相應response = requests.get(url, headers)# 成功訪問了頁面if response.status_code == 200:return response.text# 沒有成功訪問頁面,返回Nonereturn None# 圖片存放地址 Download_dir = 'picture' if os.path.exists(Download_dir) == False:os.mkdir(Download_dir)pn_num = 1 # 爬取多少頁 rn_num = 10 # 每頁多少個圖片for k in range(pn_num): # for循環,每次爬取一頁url = "https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?resource_id=28266&from_mid=1&&format=json&ie=utf-8&oe=utf-8&query=%E4%B8%AD%E5%9B%BD%E8%89%BA%E4%BA%BA&sort_key=&sort_type=1&stat0=&stat1=&stat2=&stat3=&pn=" + str(k) + "&rn=" + str(rn_num) + "&_=1613785351574"res = getPicinfo(url) # 調用函數,獲取每一頁內容json_str = json.loads(res) # 將獲取的文本格式轉化為字典格式figs = json_str['data'][0]['result']for i in figs: # for循環讀取每一張圖片的名字name = i['ename']img_url = i['pic_4n_78'] # img_url:圖片地址img_res = requests.get(img_url) # 讀取圖片所在頁面內容if img_res.status_code == 200:ext_str_splits = img_res.headers['Content-Type'].split('/')ext = ext_str_splits[-1] # 索引-1指向列表倒數第一個元素fname = name + "." + ext# 保存圖片open(os.path.join(Download_dir, fname), 'wb').write(img_res.content)print(name, img_url, "saved")機器學習-實踐七:科比職業生涯數據爬取與分析
!pip install bs4 # !wget https://mydueros.cdn.bcebos.com/font/simhei.ttf !cp /home/aistudio/work/simhei.ttf /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/mpl-data/fonts/ttf/ !cp /home/aistudio/work/simhei.ttf .fonts/ !rm -rf .cache/matplotlibimport requests from bs4 import BeautifulSoup import csv import matplotlib.pyplot as plt import pandas as pd# 設置顯示中文 plt.rcParams['font.sans-serif'] = ['simhei'] # 指定默認字體 # plt.rcParams['font.sans-serif']=['Fangsong'] # 用來顯示中文標簽 plt.rcParams['axes.unicode_minus']=False # 用來顯示負號 plt.rcParams['figure.dpi'] = 100 # 每英寸點數# coding=utf-8 ''' Created on 2021年02月20日 @author: zhongshan '''def getKobeList(code):url = "http://www.stat-nba.com/player/stat_box/195_" + code + ".html"response = requests.get(url)resKobe = response.textreturn resKobe# 獲取kobe歷史數據 def getRow(resKobe, code):soup = BeautifulSoup(resKobe, "html.parser")table = soup.find_all(id='stat_box_avg')# 表頭header = []if code == "season":header = ["賽季", "出場", "首發", "時間", "投籃", "命中", "出手", "三分", "命中", "出手", "罰球", "命中", "出手", "籃板", "前場", "后場", "助攻","搶斷", "蓋帽", "失誤", "犯規", "得分", "勝", "負"]if code == "playoff":header = ["賽季", "出場", "時間", "投籃", "命中", "出手", "三分", "命中", "出手", "罰球", "命中", "出手", "籃板", "前場", "后場", "助攻", "搶斷","蓋帽", "失誤", "犯規", "得分", "勝", "負"]if code == "allstar":header = ["賽季", "首發", "時間", "投籃", "命中", "出手", "三分", "命中", "出手", "罰球", "命中", "出手", "籃板", "前場", "后場", "助攻", "搶斷","蓋帽", "失誤", "犯規", "得分"]# 數據rows = [];rows.append(header)for tr in table[0].find_all("tr", class_="sort"):row = []for td in tr.find_all("td"):rank = td.get("rank")if rank != "LAL" and rank != None:row.append(td.get_text())rows.append(row)return rows# 寫入csv文件,rows為數據,dir為寫入文件路徑 def writeCsv(rows, dir):with open(dir, 'w', encoding='utf-8-sig', newline='') as f:writer = csv.writer(f)writer.writerows(rows)# 常規賽數據 resKobe = getKobeList("season") rows = getRow(resKobe, "season") # print(rows) writeCsv(rows, "season.csv") print("season.csv saved")# 季后賽數據 resKobe = getKobeList("playoff") rows = getRow(resKobe, "playoff") # print(rows) writeCsv(rows, "playoff.csv") print("playoff.csv saved")# 全明星數據 resKobe = getKobeList("allstar") rows = getRow(resKobe, "allstar") # print(rows) writeCsv(rows, "star.csv") print("star.csv saved") # 籃板、助攻、得分 def show_score(game_name='season', item='籃板', plot_name='line'):# game_name: season, playoff, star# item: 籃板,助攻,得分# plot_name: line,barfile_name = game_name+'.csv'data = pd.read_csv(file_name)X= data['賽季'].values.tolist()X.reverse()if item=='all':Y1 = data['籃板'].values.tolist()Y2 = data['助攻'].values.tolist()Y3 = data['得分'].values.tolist()Y1.reverse()Y2.reverse()Y3.reverse()else:Y = data[item].values.tolist() Y.reverse() if plot_name=='line':if item=='all':plt.plot(X,Y1,c='r',linestyle="-.")plt.plot(X,Y2,c='g',linestyle="--")plt.plot(X,Y3,c='b',linestyle="-")legend=['籃板','助攻','得分']else:plt.plot(X,Y,c='g',linestyle="-")legend=[item]elif plot_name=='bar':#facecolor:表面的顏色;edgecolor:邊框的顏色if item=='all':fig = plt.figure(figsize=(15,5))ax1 = plt.subplot(131)plt.bar(X,Y1,facecolor = '#9999ff',edgecolor = 'white')plt.legend(['籃板'])plt.title('Kobe職業生涯數據分析:'+game_name)plt.xticks(rotation=60)plt.ylabel('籃板')ax2 = plt.subplot(132)plt.bar(X,Y2,facecolor = '#999900',edgecolor = 'white')plt.legend(['助攻'])plt.title('Kobe職業生涯數據分析:'+game_name)plt.xticks(rotation=60)plt.ylabel('助攻')ax3 = plt.subplot(133)plt.bar(X,Y3,facecolor = '#9988ff',edgecolor = 'white')legend=['得分']else:plt.bar(X,Y,facecolor = '#9900ff',edgecolor = 'white')legend=[item] else:returnplt.legend(legend)plt.title('Kobe職業生涯數據分析:'+game_name)plt.xticks(rotation=60)plt.xlabel('賽季')if item!='all':plt.ylabel(item)else:plt.ylabel('得分')plt.savefig('work/Kobe職業生涯數據分析_{}_{}.png'.format(game_name,item))plt.show()# 籃板、助攻、得分 game_name = 'season' for game_name in ['season','playoff','star']:show_score(game_name=game_name, item='籃板', plot_name='bar')show_score(game_name=game_name, item='助攻', plot_name='bar')show_score(game_name=game_name, item='得分', plot_name='bar')show_score(game_name=game_name, item='籃板', plot_name='line')show_score(game_name=game_name, item='助攻', plot_name='line')show_score(game_name=game_name, item='得分', plot_name='line')show_score(game_name=game_name, item='all', plot_name='bar')show_score(game_name=game_name, item='all', plot_name='line')機器學習-實踐六:股票行情爬取與分析
# !pip install fake_useragent # !pip install bs4 # !cp /home/aistudio/simhei.ttf /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/mpl-data/fonts/ttf/ # !cp /home/aistudio/simhei.ttf .fonts/ # !rm -rf .cache/matplotlib#股票信息爬取 # coding=utf-8 ''' Created on 2021年02月20日@author: zhongshan ''' # http://quote.eastmoney.com/center/gridlist.html # 爬取該頁面股票信息import requests from fake_useragent import UserAgent from bs4 import BeautifulSoup import json import csvdef getHtml(url):r = requests.get(url, headers={'User-Agent': UserAgent().random,})r.encoding = r.apparent_encodingreturn r.text# num為爬取多少條記錄,可手動設置 num = 20 # 該地址為頁面實際獲取數據的接口地址 stockUrl = 'http://99.push2.eastmoney.com/api/qt/clist/get?cb=jQuery112408733409809437476_1623137764048&pn=1&pz=20&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f3&fs=m:0+t:80&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1623137764167:formatted' if __name__ == '__main__':responseText = getHtml(stockUrl)jsonText = responseText.split("(")[1].split(")")[0];resJson = json.loads(jsonText)datas = resJson["data"]["diff"]datalist = []for data in datas:# if (str().startswith('6') or str(data["f12"]).startswith('3') or str(data["f12"]).startswith('0')):row = [data["f12"], data["f14"]]datalist.append(row)print(datalist)f = open('stock.csv', 'w+', encoding='utf-8', newline="")writer = csv.writer(f)writer.writerow(('代碼', '名稱'))for data in datalist:writer.writerow((data[0] + "\t", data[1] + "\t"))f.close()import csv import urllib.request as r import threading# 讀取之前獲取的個股csv丟入到一個列表中 def getStockList():stockList = []f = open('stock.csv', 'r', encoding='utf-8')f.seek(0)reader = csv.reader(f)for item in reader:stockList.append(item)f.close()return stockListdef downloadFile(url, filepath):# print(filepath)try:r.urlretrieve(url, filepath)except Exception as e:print(e)print(filepath, "is downloaded")pass# 設置信號量,控制線程并發數 sem = threading.Semaphore(1)def downloadFileSem(url, filepath):with sem:downloadFile(url, filepath)urlStart = 'http://quotes.money.163.com/service/chddata.html?code=' urlEnd = '&end=20210221&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;VOTURNOVER;VATURNOVER'if __name__ == '__main__':stockList = getStockList()stockList.pop(0)print(stockList)for s in stockList:scode = str(s[0].split("\t")[0])# 0:滬市;1:深市url = urlStart + ("0" if scode.startswith('6') else "1") + scode + urlEndprint(url)filepath = (str(s[1].split("\t")[0]) + "_" + scode) + ".csv"threading.Thread(target=downloadFileSem, args=(url, filepath)).start()#股票信息分析import pandas as pd import matplotlib.pyplot as plt import csv# 設置顯示中文 plt.rcParams['font.sans-serif'] = ['simhei'] # 指定默認字體 plt.rcParams['axes.unicode_minus'] = False # 用來顯示負號 plt.rcParams['figure.dpi'] = 100 # 每英寸點數files = []# ['日期' '股票代碼' '名稱' '收盤價' '最高價' '最低價' '開盤價' '前收盤' '漲跌額' '漲跌幅' '成交量' '成交金額'] def read_file(file_name):data = pd.read_csv(file_name, encoding='gbk')col_name = data.columns.valuesreturn data, col_namedef get_files_path():stock_list = getStockList()paths = []for stock in stock_list[1:]:p = stock[1].strip() + "_" + stock[0].strip() + ".csv"print(p)data, _ = read_file(p)if len(data) > 1:files.append(p)print(p)get_files_path() print(files)# 獲取股票的漲跌額及漲跌幅度變化曲線 # ['日期' '股票代碼' '名稱' '收盤價' '最高價' '最低價' '開盤價' '前收盤' '漲跌額' '漲跌幅' '成交量' '成交金額'] def get_diff(file_name):data, col_name = read_file(file_name)index = len(data['日期']) - 1sep = index // 15plt.figure(figsize=(15, 17))x = data['日期'].values.tolist()x.reverse()# x = x[-index:]xticks = list(range(0, len(x), sep))xlabels = [x[i] for i in xticks]xticks.append(len(x))# xlabels.append(x[-1])y1 = [float(c) if c != 'None' else 0 for c in data['漲跌額'].values.tolist()]y2 = [float(c) if c != 'None' else 0 for c in data['漲跌幅'].values.tolist()]y1.reverse()y2.reverse()# y1 = y1[-index:]# y2 = y2[-index:]ax1 = plt.subplot(211)plt.plot(range(1, len(x) + 1), y1, c='r')plt.title('{}-漲跌額/漲跌幅'.format(file_name.split('_')[0]), fontsize=20)ax1.set_xticks(xticks)ax1.set_xticklabels(xlabels, rotation=40)# plt.xlabel('日期')plt.ylabel('漲跌額', fontsize=20)ax2 = plt.subplot(212)plt.plot(range(1, len(x) + 1), y2, c='g')# plt.title('{}-漲跌幅'.format(file_name.split('_')[0]))ax2.set_xticks(xticks)ax2.set_xticklabels(xlabels, rotation=40)plt.xlabel('日期', fontsize=20)plt.ylabel('漲跌幅', fontsize=20)plt.savefig('work/' + file_name.split('.')[0] + '_diff.png')plt.show()def get_max_min(file_name):data, col_name = read_file(file_name)index = len(data['日期']) - 1sep = index // 15plt.figure(figsize=(15, 10))x = data['日期'].values.tolist()x.reverse()x = x[-index:]xticks = list(range(0, len(x), sep))xlabels = [x[i] for i in xticks]xticks.append(len(x))# xlabels.append(x[-1])y1 = [float(c) if c != 'None' else 0 for c in data['最高價'].values.tolist()]y2 = [float(c) if c != 'None' else 0 for c in data['最低價'].values.tolist()]y1.reverse()y2.reverse()y1 = y1[-index:]y2 = y2[-index:]ax = plt.subplot(111)plt.plot(range(1, len(x) + 1), y1, c='r', linestyle="-")plt.plot(range(1, len(x) + 1), y2, c='g', linestyle="--")plt.title('{}-最高價/最低價'.format(file_name.split('_')[0]), fontsize=20)ax.set_xticks(xticks)ax.set_xticklabels(xlabels, rotation=40)plt.xlabel('日期', fontsize=20)plt.ylabel('價格', fontsize=20)plt.legend(['最高價', '最低價'], fontsize=20)plt.savefig('work/' + file_name.split('.')[0] + '_minmax.png')plt.show()def get_deal(file_name):data, col_name = read_file(file_name)index = len(data['日期']) - 1sep = index // 15plt.figure(figsize=(15, 10))x = data['日期'].values.tolist()x.reverse()x = x[-index:]xticks = list(range(0, len(x), sep))xlabels = [x[i] for i in xticks]xticks.append(len(x))# xlabels.append(x[-1])y1 = [float(c) if c != 'None' else 0 for c in data['成交量'].values.tolist()]y2 = [float(c) if c != 'None' else 0 for c in data['成交金額'].values.tolist()]y1.reverse()y2.reverse()y1 = y1[-index:]y2 = y2[-index:]ax = plt.subplot(111)plt.plot(range(1, len(x) + 1), y1, c='b', linestyle="-")plt.plot(range(1, len(x) + 1), y2, c='r', linestyle="--")plt.title('{}-成交量/成交金額'.format(file_name.split('_')[0]), fontsize=20)ax.set_xticks(xticks)ax.set_xticklabels(xlabels, rotation=40)plt.xlabel('日期', fontsize=20)# plt.ylabel('')plt.legend(['成交量', '成交金額'], fontsize=20)plt.savefig('work/' + file_name.split('.')[0] + '_deal.png')plt.show()def get_rel(file_name):data, col_name = read_file(file_name)index = len(data['日期']) - 1sep = index // 15plt.figure(figsize=(15, 10))x = data['日期'].values.tolist()x.reverse()x = x[-index:]xticks = list(range(0, len(x), sep))xlabels = [x[i] for i in xticks]xticks.append(len(x))# xlabels.append(x[-1])y1 = [float(c) if c != 'None' else 0 for c in data['成交量'].values.tolist()]y2 = [float(c) if c != 'None' else 0 for c in data['漲跌幅'].values.tolist()]y1.reverse()y2.reverse()y1 = y1[-index:]y2 = y2[-index:]y2 = [0] + y2[:-1]ax = plt.subplot(111)plt.scatter(y2, y1)plt.title('{}-成交量與前一天漲跌幅的關系'.format(file_name.split('_')[0]), fontsize=20)# ax.set_xticks(xticks)# ax.set_xticklabels(xlabels, rotation=40)plt.xlabel('前一天漲跌幅', fontsize=20)plt.ylabel('成交量', fontsize=20)# plt.legend(['成交量','成交金額'],fontsize=20)plt.savefig('work/' + file_name.split('.')[0] + '_rel.png')plt.show()# for file in files: # get_diff(file)# for file in files: # get_max_min(file) print(len(files)) for file in files:get_max_min(file)get_deal(file)get_diff(file)get_rel(file)# read_file('潤和軟件_300339.csv') # read_file('N邁拓_301006.csv') # read_file('N崧盛_301002.csv')總結
以上是生活随笔為你收集整理的实践 | 图片文本爬虫与数据分析的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Android 设置定时刷新页面数据,最
- 下一篇: [译]如何在visual studio中