1.打開攝像頭設備捕獲視頻流并將視頻流轉換為本地URL輸出到video顯示
示例代碼:
let video = document.getElementById(''video)
navigator.mediaDevices.getUserMedia({
video: true
}).then(async stream => {
video.srcObject = stream
})
呈現結果:將視頻渲染至video上:

2.利用setInterval或requestAnimationRequest將每一幀的video重繪在canvas上,canvas便于對后續識別的結果做呈現
// 獲取縮放比
const getPixelRatio = (context) => {
let backingStore = context.backingStorePixelRatio ||
context.webkitBackingStorePixelRatio ||
context.mozBackingStorePixelRatio ||
context.msBackingStorePixelRatio ||
context.oBackingStorePixelRatio ||
context.backingStorePixelRatio || 1;
return (window.devicePixelRatio || 1) / backingStore;
};
// 重繪至canvas
const canvasCapture = () => {
let context = canvasDom.getContext('2d')
const scale = getPixelRatio(context)
canvasDom.width = 640 * scale
canvasDom.height = 480 * scale
interVal= setInterval(() => {
context.drawImage(videoDom, 0, 0, 640 * scale, 480 * scale)
}, 15)
}
3.使用基于tensorflow的js庫face-API加載人臉識別模型,根據識別返回的結果,繪制呈現的形式
// 加載AImodel
const init = async () => {
await faceApi.nets.ssdMobilenetv1.loadFromUri("/faceModels");
await faceApi.loadFaceLandmarkModel("/faceModels");
options = new faceApi.SsdMobilenetv1Options({
minConfidence: 0.5, // 0.1 ~ 0.9
});
interVal= setInterval(async () => {
const result = await faceApi.detectSingleFace(videoDom, options).withFaceLandmarks()
if (result) {
const canvas = document.getElementById('overlay')
if (useCamera && canvas) {
const dims = faceApi.matchDimensions(canvas, videoDom, true)
const resizedResult = faceApi.resizeResults(result, dims)
if (withBoxes) {
faceApi.draw.drawDetections(canvas, resizedResult)
}
faceApi.draw.drawFaceLandmarks(canvas, resizedResult)
}
}
})
}
4.關閉攝像頭,同時清除定時器和清空畫布
const closeCamera = () => {
videoSrc.getTracks()[0].stop()
showCanvas= false
useCamera= false
if (interVal) {
clearInterval(interVal)
interVal= null
setTimeout(() => {
showCanvas= true
}, 10)
}
}