微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

如何使 OpenPose 更准确

如何解决如何使 OpenPose 更准确

我使用 openpose 库能够找到人体上的特定点并识别它们。最终,我想用它来对简单的任务(例如步行或跑步)进行一些人类动作识别。我现在所拥有的并不能清楚地识别人体上的所有点,我想知道如何使姿势估计更准确。 This is the output I get

import cv2 as cv

import matplotlib.pyplot as plt

net = cv.dnn.readNetFromTensorflow("graph_opt.pb")

inWidth = 368
inHeight = 368
thr = 0.2

BODY_PARTS = { "Nose": 0,"Neck": 1,"RShoulder": 2,"RElbow": 3,"RWrist": 4,"LShoulder": 5,"LElbow": 6,"LWrist": 7,"RHip": 8,"RKnee": 9,"RAnkle": 10,"LHip": 11,"LKnee": 12,"LAnkle": 13,"REye": 14,"LEye": 15,"REar": 16,"LEar": 17,"Background": 18 }

POSE_PAirs = [ ["Neck","RShoulder"],["Neck","LShoulder"],["RShoulder","RElbow"],["RElbow","RWrist"],["LShoulder","LElbow"],["LElbow","LWrist"],"RHip"],["RHip","RKnee"],["RKnee","RAnkle"],"LHip"],["LHip","LKnee"],["LKnee","LAnkle"],"Nose"],["Nose","REye"],["REye","REar"],"LEye"],["LEye","LEar"] ]

frameWidth = img.shape[1]
frameHeight = img.shape[0]

def pose_estimation(frame):

    frameWidth = frame.shape[1]
    frameHeight = frame.shape[0]
    
    net.setInput(cv.dnn.blobFromImage(frame,1.0,(inWidth,inHeight),(127.5,127.5,127.5),swapRB=True,crop=False))
    out = net.forward()
    out = out[:,:19,:,:]  # MobileNet output [1,57,-1,-1],we only need the first 19 elements

    assert(len(BODY_PARTS) == out.shape[1])

    points = []
    for i in range(len(BODY_PARTS)):
        # Slice heatmap of corresponging body's part.
        heatMap = out[0,i,:]

        # Originally,we try to find all the local maximums. To simplify a sample
        # we just find a global one. However only a single pose at the same time
        # Could be detected this way.
        _,conf,_,point = cv.minMaxLoc(heatMap)
        x = (frameWidth * point[0]) / out.shape[3]
        y = (frameHeight * point[1]) / out.shape[2]
        # Add a point if it's confidence is higher than threshold.
        points.append((int(x),int(y)) if conf > thr else None)

    for pair in POSE_PAirs:
        partFrom = pair[0]
        partTo = pair[1]
        assert(partFrom in BODY_PARTS)
        assert(partTo in BODY_PARTS)

        idFrom = BODY_PARTS[partFrom]
        idTo = BODY_PARTS[partTo]

        if points[idFrom] and points[idTo]:
            cv.line(frame,points[idFrom],points[idTo],(0,255,0),3)
            cv.ellipse(frame,(3,3),360,255),cv.FILLED)
            cv.ellipse(frame,cv.FILLED)

    t,_ = net.getPerfProfile()
    freq = cv.getTickFrequency() / 1000
    cv.putText(frame,'%.2fms' % (t / freq),(10,20),cv.FONT_HERShey_SIMPLEX,0.5,0))

    cv.imshow('OpenPose using OpenCV',frame)
    
    return frame

estimated_image = pose_estimation(img)
plt.imshow(cv.cvtColor(estimated_image,cv.COLOR_BGR2RGB)

)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。