使用百度API进行关键点识别
在使用百度的AI開放平臺時,不熟悉網(wǎng)頁請求這類知識,遇到使用不暢的問題,借鑒了網(wǎng)上兩個人的經(jīng)驗,最后實現(xiàn)了更直白的代碼。
主程序:
''' # 人體關(guān)鍵點識別 ''' import base64 import urllib import urllib.request,sys,base64 import urllib.parse import json import joint import cv2request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/body_analysis" #使用不同的功能時在百度的相應(yīng)說明文檔處替換此處f = open('/home/zhengr/Documents/data/1.jpg', 'rb') image = base64.b64encode(f.read()) image64 = str(image,'utf-8') image_type = "BASE64"params = {'image': image64,'image_type':"BASE64"}params = urllib.parse.urlencode(params).encode("utf-8")access_token = '[24.fdd8df19e52da8ff449e1484aa582f42.2592000.1556250057.282335-15823849]' request_url = request_url + "?access_token=" + access_token #access token是每個人獲得的,有效期30天?貌似request = urllib.request.urlopen(url=request_url, data=params) # 發(fā)送請求content = request.read() # 將返回結(jié)果讀取出來 print(content) # 顯示返回結(jié)果 result = str(content,'utf-8') res = json.loads(result) print(res['person_info'][0]['body_parts']) ress = res['person_info'][0]['body_parts'] jo = (ress) jo.xunhun('/home/zhengr/Documents/data/1.jpg')?直接用python執(zhí)行該程序就可以獲得關(guān)鍵點識別結(jié)果,access token獲得需要的代碼:
#!/bin/bash curl -i -k 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=QMLVBU3QbNA25XxawltynC1R&client_secret=GuwC9U5WTIbvWgo7ryolIB6Yy1e5H5Nx'其中的client_id和client_secret分別是注冊百度平臺時獲得的API Key和Secret Key。執(zhí)行以上文件,得到"access_token":"24.fdd8df19e52da8ff449e1484aa582f42.2592000.1556250057.282335-15823849"格式的即為自己的access token。以上代碼中的joint.Joint()是網(wǎng)上的,其代碼如下:
import cv2 import os class Joint(object):__circle_list = []def __init__(self,dic): self.dic = dicdef draw_line(self,img):#nose ---> neckcv2.line(img, (int(self.dic['nose']['x']),int(self.dic['nose']['y'])),(int(self.dic['neck']['x']),int(self.dic['neck']['y'])), (0,255,0), 2)#neck --> left_shouldercv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),(int(self.dic['left_shoulder']['x']),int(self.dic['left_shoulder']['y'])), (0,255,0), 2) #neck --> right_shouldercv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),(int(self.dic['right_shoulder']['x']),int(self.dic['right_shoulder']['y'])), (0,255,0), 2) #left_shoulder --> left_elbowcv2.line(img, (int(self.dic['left_shoulder']['x']),int(self.dic['left_shoulder']['y'])),(int(self.dic['left_elbow']['x']),int(self.dic['left_elbow']['y'])), (0,255,0), 2) #left_elbow --> left_wristcv2.line(img, (int(self.dic['left_elbow']['x']),int(self.dic['left_elbow']['y'])),(int(self.dic['left_wrist']['x']),int(self.dic['left_wrist']['y'])), (0,255,0), 2) #right_shoulder --> right_elbowcv2.line(img, (int(self.dic['right_shoulder']['x']),int(self.dic['right_shoulder']['y'])),(int(self.dic['right_elbow']['x']),int(self.dic['right_elbow']['y'])), (0,255,0), 2) #right_elbow --> right_wristcv2.line(img, (int(self.dic['right_elbow']['x']),int(self.dic['right_elbow']['y'])),(int(self.dic['right_wrist']['x']),int(self.dic['right_wrist']['y'])), (0,255,0), 2) #neck --> left_hipcv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),(int(self.dic['left_hip']['x']),int(self.dic['left_hip']['y'])), (0,255,0), 2) #neck --> right_hipcv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),(int(self.dic['right_hip']['x']),int(self.dic['right_hip']['y'])), (0,255,0), 2) #left_hip --> left_kneecv2.line(img, (int(self.dic['left_hip']['x']),int(self.dic['left_hip']['y'])),(int(self.dic['left_knee']['x']),int(self.dic['left_knee']['y'])), (0,255,0), 2) #right_hip --> right_kneecv2.line(img, (int(self.dic['right_hip']['x']),int(self.dic['right_hip']['y'])),(int(self.dic['right_knee']['x']),int(self.dic['right_knee']['y'])), (0,255,0), 2) #left_knee --> left_anklecv2.line(img, (int(self.dic['left_knee']['x']),int(self.dic['left_knee']['y'])),(int(self.dic['left_ankle']['x']),int(self.dic['left_ankle']['y'])), (0,255,0), 2) #right_knee --> right_anklecv2.line(img, (int(self.dic['right_knee']['x']),int(self.dic['right_knee']['y'])),(int(self.dic['right_ankle']['x']),int(self.dic['right_ankle']['y'])), (0,255,0), 2)def xunhun(self,img):im1 = cv2.imread(img,cv2.IMREAD_COLOR)#im2 = cv2.resize(im1, (1040,768), interpolation=cv2.INTER_CUBIC)for i in self.dic:cv2.circle(im1,(int(self.dic[i]['x']),int(self.dic[i]['y'])),5,(0,255,0),-1)self.draw_line(im1)cv2.imshow('image',im1)cv2.waitKey(0)?
使用這個代碼,會有不準(zhǔn)的時候,試了幾張圖片都會出現(xiàn),有部分關(guān)鍵點的坐標(biāo)是(0,0),這種在返回值里有不正確、正確的關(guān)鍵點問題,應(yīng)該不是我能解決的,所以就沒有再追究了。
?
?
如有不對的地方,歡迎批評指正,如有侵權(quán),請聯(lián)系我刪除。
?
轉(zhuǎn)載于:https://www.cnblogs.com/xiaoheizi-12345/p/10658936.html
總結(jié)
以上是生活随笔為你收集整理的使用百度API进行关键点识别的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 省掉bean自定义spring mvc注
- 下一篇: excel如何使用标准差函数