微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

如何在 ES6 node.js 环境中运行 mediapipe facemesh react

如何解决如何在 ES6 node.js 环境中运行 mediapipe facemesh react

我正在尝试在 create react 应用程序中从 https://codepen.io/mediapipe/details/KKgVaPJ 运行此 HTML 示例 https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api。我已经做了:

  • npm 安装所有 facemesh mediapipe 包。
  • 已经用节点导入替换了 jsdelivr 标签,我得到了定义和函数
  • 用 react-cam 替换 video 元素

不知道怎么替换这个jsdelivr,可能是影响:

const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });

所以问题是:

  • 为什么 facemesh 没有显示?有没有我正在尝试做的事情的例子?

这是我的 App.js 代码(抱歉调试脚手架):

import './App.css';
import React,{ useState,useEffect } from "react";
import Webcam from "react-webcam";
import { Camera,CameraOptions } from '@mediapipe/camera_utils'
import {
  FaceMesh,FACEMESH_TESSELATION,FACEMESH_RIGHT_EYE,FACEMESH_LEFT_EYE,FACEMESH_RIGHT_EYEbroW,FACEMESH_LEFT_EYEbroW,FACEMESH_FACE_oval,FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'

const videoConstraints = {
  width: 1280,height: 720,facingMode: "user"
};

function App() {
  const webcamRef = React.useRef(null);
  const canvasReference = React.useRef(null);
  const [cameraReady,setCameraReady] = useState(false);
  let canvasCtx
  let camera

  const videoElement = document.getElementsByClassName('input_video')[0];
  // const canvasElement = document.getElementsByClassName('output_canvas')[0];

  const canvasElement = document.createElement('canvas');

  console.log('canvasElement',canvasElement)
  console.log('canvasCtx',canvasCtx)

  useEffect(() => {
    camera = new Camera(webcamRef.current,{
      onFrame: async () => {
        console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
      },width: 1280,height: 720
    });

    canvasCtx = canvasReference.current.getContext('2d');
    camera.start();
    console.log('canvasReference',canvasReference)

  },[cameraReady]);

  function onResults(results) {
    console.log('results')
    canvasCtx.save();
    canvasCtx.clearRect(0,canvasElement.width,canvasElement.height);
    canvasCtx.drawImage(
      results.image,canvasElement.height);
    if (results.multiFaceLandmarks) {
      for (const landmarks of results.multiFaceLandmarks) {
        drawConnectors(canvasCtx,landmarks,{ color: '#C0C0C070',linewidth: 1 });
        drawConnectors(canvasCtx,{ color: '#FF3030' });
        drawConnectors(canvasCtx,{ color: '#30FF30' });
        drawConnectors(canvasCtx,{ color: '#E0E0E0' });
        drawConnectors(canvasCtx,FACEMESH_LIPS,{ color: '#E0E0E0' });
      }
    }
    canvasCtx.restore();
  }

  const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });
  faceMesh.setoptions({
    selfieMode: true,maxnumFaces: 1,minDetectionConfidence: 0.5,minTrackingConfidence: 0.5
  });
  faceMesh.onResults(onResults);

  // const camera = new Camera(webcamRef.current,{
  //   onFrame: async () => {
  //     await faceMesh.send({ image: videoElement });
  //   },//   width: 1280,//   height: 720
  // });
  // camera.start();

  return (
    <div className="App">
      <Webcam
        audio={false}
        height={720}
        ref={webcamRef}
        screenshotFormat="image/jpeg"
        width={1280}
        videoConstraints={videoConstraints}
        onUserMedia={() => {
          console.log('webcamRef.current',webcamRef.current);
          // navigator.mediaDevices
          //   .getUserMedia({ video: true })
          //   .then(stream => webcamRef.current.srcObject = stream)
          //   .catch(console.log);

          setCameraReady(true)
        }}
      />
      <canvas
        ref={canvasReference}
        style={{
          position: "absolute",marginLeft: "auto",marginRight: "auto",left: 0,right: 0,textAlign: "center",zindex: 9,}}
      />

    </div >
  );
}

export default App;

解决方法

不用换jsdelivr,那段代码就好了;我还认为您需要稍微重新排序您的代码:

  • 你应该把faceMesh初始化放在useEffect里面,用[]作为参数;因此,该算法将在第一次呈现页面时启动
  • 此外,您不需要使用 doc.* 获取 videoElement 和 canvasElement,因为您已经定义了一些引用

代码示例:

useEffect(() => {
const faceMesh = new FaceDetection({
  locateFile: (file) => {
    return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
  },});

faceMesh.setOptions({
  maxNumFaces: 1,minDetectionConfidence: 0.5,minTrackingConfidence: 0.5,});

faceMesh.onResults(onResults);

if (
  typeof webcamRef.current !== "undefined" &&
  webcamRef.current !== null
) {
  camera = new Camera(webcamRef.current.video,{
    onFrame: async () => {
      await faceMesh.send({ image: webcamRef.current.video });
    },width: 1280,height: 720,});
  camera.start();
    }
  },[]);

最后,在 onResults 回调中,我建议首先打印结果,只是为了检查 Mediapipe 实现是否正常工作。并且不要忘记在绘制之前设置画布大小。

function onResults(results){
   console.log(results)
   canvasCtx = canvasReference.current.getContext('2d')
   canvas.width = webcamRef.current.video.videoWidth;
   canvas.height = webcamRef.current.video.videoHeight;;

   ...
}

祝你好运! :)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。