【项目实战】vue-springboot-pytorch前后端结合pytorch深度学习 html打开本地摄像头 监控人脸和记录时间
生活随笔
收集整理的這篇文章主要介紹了
【项目实战】vue-springboot-pytorch前后端结合pytorch深度学习 html打开本地摄像头 监控人脸和记录时间
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
是一個項目的一個功能之一,調(diào)試了兩小時,終于能夠
javascript設(shè)置開始計和暫停計時 監(jiān)控人臉 記錄時間了
效果圖:
離開頁面之后回到頁面會從0計時(不是關(guān)閉頁面,而是頁面失去焦點)
離開攝像頭時會彈出提示。
離開攝像頭反饋給后端的時間。
全部代碼:
<template><div class="camera_outer"><video src="../assets/shu.mp4" style="width: 600px;height: 600px;margin-left: 150px" controls="controls"></video> <hr><el-button type="warning" @click.native="gettime()" style="margin-left: 300px"><i class="el-icon-video-camera-solid"></i> 開始聽課</el-button><el-button type="primary" @click="computetime()"><i class="el-icon-price-tag"></i> 結(jié)束聽課</el-button><el-button type="success" @click="out()"><i class="el-icon-loading "></i> 暫時離開</el-button><div style="width: 130px;background: #00eeee;margin-left: 350px" v-if="this.set==true"><i class="el-icon-alarm-clock"></i>您上課的時間:<div ref="startTimer"></div></div><videoid="videoCamera":width="videoWidth":height="videoHeight"autoplay></video><br><canvasid="canvasCamera":width="videoWidth":height="videoHeight"></canvas></div></template> <script>export default {beforeRouteLeave (to, from, next) {// 這里需要elementui的支持,如果使用其他界面組件自行替換即可this.$confirm('正在離開本頁面,本頁面時間將從零開始計時', '警告', {confirmButtonText: '確定',cancelButtonText: '取消',type: 'warning'}).then(() => {// 正常跳轉(zhuǎn)next()}).catch(() => {// 如果取消跳轉(zhuǎn)地址欄會變化,這時保持地址欄不變window.history.go(1)})},data() {return {activeIndex2: '1',timer: "",content: "",hour: 0,minutes: 0,seconds: 0,videoWidth: 150,videoHeight: 150,imgSrc: "",thisCancas: null,thisContext: null,thisVideo: null,total:0,nowtime:[],set:true,yes:true,begintime:''};},created() {document.addEventListener('visibilitychange', this.startStopVideo)},mounted() {this.getCompetence();//this.gettime()var _this = this;this.thisCancas = document.getElementById("canvasCamera");this.thisContext = this.thisCancas.getContext("2d");this.thisVideo = document.getElementById("videoCamera");window.setInterval(this.setImage, 2000);},methods: {startStopVideo() {if (document.visibilityState === 'hidden') {if(this.yes==true){var stop1 = clearInterval(this.startTimer);this.yes = false;}else if(this.yes==false) {var stop2 = clearInterval(this.startTimer);this.yes = true} window.location.reload()} else if (document.visibilityState === 'visible') {this.$message({message: '您剛剛離開了觀看頁面,將從零開始計時!',type: 'warning'});this.getCompetence();var _this = this;this.thisCancas = document.getElementById("canvasCamera");this.thisContext = this.thisCancas.getContext("2d");this.thisVideo = document.getElementById("videoCamera");window.setInterval(this.setImage, 2000);} },out(){var stop1 = clearInterval(this.timer);},startTimer () {this.seconds += 1;if (this.seconds >= 60) {this.seconds = 0;this.minutes = this.minutes + 1;}if (this.minutes >= 60) {this.minutes = 0;this.hour = this.hour + 1;}this.total = this.minutes + this.hour * 60this.$refs.startTimer.innerHTML = (this.minutes < 10 ? '0' + this.minutes : this.minutes) + ':' + (this.seconds < 10 ? '0' + this.seconds : this.seconds) + ' total:' + this.total;},computetime(){var that = thisthis.set = false;this.$axios.post("/gettime",{timenot: this.nowtime,total: this.total + '分',begintime: this.begintime}).then(resp => {if (resp && resp.status === 200) {//that.$message({type: 'success',message: '您有一條新的學習記錄生成!'})}})},gettime(){this.begintime=new Date().toLocaleTimeString();this.timer = setInterval(this.startTimer, 1000);},// 調(diào)用權(quán)限(打開攝像頭功能)getCompetence() {var _this = this;this.thisCancas = document.getElementById("canvasCamera");this.thisContext = this.thisCancas.getContext("2d");this.thisVideo = document.getElementById("videoCamera");// 舊版本瀏覽器可能根本不支持mediaDevices,我們首先設(shè)置一個空對象if (navigator.mediaDevices === undefined) {navigator.mediaDevices = {};}// 一些瀏覽器實現(xiàn)了部分mediaDevices,我們不能只分配一個對象// 使用getUserMedia,因為它會覆蓋現(xiàn)有的屬性。// 這里,如果缺少getUserMedia屬性,就添加它。if (navigator.mediaDevices.getUserMedia === undefined) {navigator.mediaDevices.getUserMedia = function (constraints) {// 首先獲取現(xiàn)存的getUserMedia(如果存在)var getUserMedia =navigator.webkitGetUserMedia ||navigator.mozGetUserMedia ||navigator.getUserMedia;// 有些瀏覽器不支持,會返回錯誤信息// 保持接口一致if (!getUserMedia) {return Promise.reject(new Error("getUserMedia is not implemented in this browser"));}// 否則,使用Promise將調(diào)用包裝到舊的navigator.getUserMediareturn new Promise(function (resolve, reject) {getUserMedia.call(navigator, constraints, resolve, reject);});};}var constraints = {audio: false,video: {width: this.videoWidth,height: this.videoHeight,transform: "scaleX(-1)",},};navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {// 舊的瀏覽器可能沒有srcObjectif ("srcObject" in _this.thisVideo) {_this.thisVideo.srcObject = stream;} else {// 避免在新的瀏覽器中使用它,因為它正在被棄用。_this.thisVideo.src = window.URL.createObjectURL(stream);}_this.thisVideo.onloadedmetadata = function (e) {_this.thisVideo.play();};}).catch((err) => {console.log(err);});},// 繪制圖片(拍照功能)setImage() {var date = new Date();date .getYear(); //獲取當前年份(2位)date .getFullYear(); //獲取完整的年份(4位)date .getMonth(); //獲取當前月份(0-11,0代表1月)date .getDate(); //獲取當前日(1-31)date.getDay(); //獲取當前星期X(0-6,0代表星期天)date.getTime(); //獲取當前時間(從1970.1.1開始的毫秒數(shù))date.getHours(); //獲取當前小時數(shù)(0-23)date.getMinutes(); //獲取當前分鐘數(shù)(0-59)date.getSeconds(); //獲取當前秒數(shù)(0-59)var _this = this;_this.thisContext.drawImage(_this.thisVideo,0,0,_this.videoWidth,_this.videoHeight);// 獲取圖片base64鏈接var image = this.thisCancas.toDataURL("image/png").split('base64,')[1];_this.imgSrc = image;console.log(_this.imgSrc)this.$axios.post('http://localhost:5000/getpic', {data: _this.imgSrc}).then(resp => {if (resp && resp.status === 200) {//狀態(tài)碼為200為請求成功//手動構(gòu)造base64路徑:前綴+返回碼this.base64code = 'data:image/png;base64,' + resp.data.base64codeif(resp.data == 'b'){ this.$message.error( date.getHours()+':'+date.getMinutes()+':'+date.getSeconds()+"請您勿離開攝像頭視野!")let thistime =date.getHours()+':'+date.getMinutes()+':'+date.getSeconds()this.nowtime.push(thistime)}_this.thisContext.clearRect(0,0, _this.videoWidth,_this.videoHeight);// location.reload();//console.log(this.base64code)}})}}, beforeDestroy() {document.removeEventListener('visibilitychange', this.startStopVideo)}}; </script>其中包含了圖片解碼編碼,打開攝像頭,獲取后端結(jié)果。
服務器有兩個,一個是python的pytorch深度學習處理圖片 在flask框架下,一個是java的springboot來獲得離開攝像頭的時間。
java部分:
實體類:
跨域請求:
@RestController public class Timecontroller {@AutowiredTimeDao timeDao;static int id = 0;@CrossOrigin@PostMapping("/gettime")public String getteacherList(@RequestBody String time){id++;System.out.println(time);// System.out.println(nowtime.getId());Map<String, Object> jsonMap = JSON.parseObject(time);System.out.println(jsonMap.get("total"));LocalDate date = LocalDate.now();System.out.println(date);Nowtime nowtime = new Nowtime();nowtime.setNowtime(date.toString());String ns=jsonMap.get("timenot").toString();String totaltime=jsonMap.get("total").toString();String begintime = jsonMap.get("begintime").toString();nowtime.setTimenot(ns);nowtime.setTotal(totaltime);nowtime.setId(id);nowtime.setBegintime(begintime);timeDao.addtime(nowtime);return "ok";//return timenot;}@GetMapping("/gettime")public String getalltime(){System.out.println("time!");List<Nowtime> nowtimes = timeDao.getall();HashMap<String, Object> res = new HashMap<>();res.put("data",nowtimes);String users_json = JSON.toJSONString(res);return users_json;}}xml語句:
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN""http://mybatis.org/dtd/mybatis-3-mapper.dtd"> <mapper namespace="com.naughty.userlogin02.dao.TimeDao"><insert id="addtime" parameterType="com.naughty.userlogin02.bean.Nowtime">insert into data1.gettime(timenot,total,nowtime,begintime) values (#{timenot},#{total},#{nowtime},#{begintime});</insert><select id="getall" resultType="com.naughty.userlogin02.bean.Nowtime">SELECT * FROM gettime<if test="nowtime !=null">WHERE nowtime like #{nowtime}</if></select> </mapper>python的主程序:
import base64 from predict import class_names import torch from torchvision import datasets, models, transforms import cv2 import numpy as np import requests from flask import Flask,make_response, jsonify import flask from flask_cors import CORS import socket import threading import json import os from io import BytesIO from multiprocessing import Process import io from PIL import Image # 配置全局app app = Flask(__name__) # 導入index中定義的所有函數(shù) #from autotrade.server.index import *def run_index():# 啟動web服務器,使用多線程方式,接收所有http請求app.run(host='0.0.0.0', port=5000, threaded=True)def make_new_response(data):res = make_response(jsonify({'code': 0, 'data': data}))res.headers['Access-Control-Allow-Origin'] = '*'res.headers['Access-Control-Allow-Method'] = '*'res.headers['Access-Control-Allow-Headers'] = '*'return resdef decode_base64(data):"""Decode base64, padding being optional.:param data: Base64 data as an ASCII byte string:returns: The decoded byte string."""missing_padding = len(data) % 4if missing_padding != 0:data += b'='* (4 - missing_padding)# return base64.decodestring(data)return base64.b64decode(data)@app.route("/test") def test():res = "{'no':'dddd'}"return make_new_response(res)CORS(app, resources=r'/*', supports_credentials=True)basedir = os.path.abspath(os.path.dirname(__file__))transform=transforms.Compose([transforms.Resize(224),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])]) @app.route('/getpic', methods=['POST']) def getpic():data = json.loads(flask.request.get_data("data"))data_64 = str.encode(data['data'])print(type(data_64))#print(data_64)print('------------------------')print(str(data_64, 'utf8'))imgdata = decode_base64(data_64)file = open('1.jpg', 'wb')file.write(imgdata)file.close()image = Image.open(r"1.jpg").convert('RGB')image = transform(image).unsqueeze(0)modelme = torch.load('modefresnet.pkl')modelme.eval()outputs = modelme(image)_, predict = torch.max(outputs.data, 1)for j in range(image.size()[0]):print('predicted: {}'.format(class_names[predict[j]]))return class_names[predict[j]]if __name__ == "__main__":app.run(debug=True)深度學習的處理圖片的網(wǎng)絡(luò)模型就不貼了,需要的可以留言
用的是Resnet殘差網(wǎng)絡(luò)。識別速度還是很快的,判斷的正確率也比較高。(訓練的數(shù)據(jù)集很少,只有六百多張)
##Y1BCojf69##4;1%yBNfY3ne6a!/
下例為從指定的層提取ResNet50的特征。
總結(jié)
以上是生活随笔為你收集整理的【项目实战】vue-springboot-pytorch前后端结合pytorch深度学习 html打开本地摄像头 监控人脸和记录时间的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Document.visibilityS
- 下一篇: 【笔记 】栈底层 循环队列的处理 链栈