# -*- coding: utf-8 -*-
# @Time : 2023/10/13 15:08
# @Author : RaSo
# @File : interface_keypoints_for_standingDetect.py
# Begin to show your code!
from ultralytics import YOLO
import math
# 本接口输入和yolov8一样,输出为目标框和站姿检测结果。
# 实现方式,首先,通过yolov8-pose模型,预测出目标和关键点。输出为目标类别和box,以及该目标17个关键点(x,y,score)
# 然后,左右肩,髋,膝,脚踝,肢体关系,确定站姿的目标,输出人的目标框和站姿检测结果output= 图像,{person:standYes/No}
def calculate_distance(x1, y1, x2, y2):
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# B相对于点A和点C的夹角
def calculate_angle(A, B, C):
AB = calculate_distance(A[0], A[1], B[0], B[1])
BC = calculate_distance(B[0], B[1], C[0], C[1])
AC = calculate_distance(A[0], A[1], C[0], C[1])
cos_angle = (AB ** 2 + BC ** 2 - AC ** 2) / (2 * AB * BC) # 余弦
angle = math.degrees(math.acos(cos_angle))
return angle, cos_angle
# 返回置信度在0.5以上的关键点个数,用来判断计算结果可信与否。
def count_numbers_greater_than_0_5(input_list):
count = 0
for score in input_list:
if score > 0.60:
count += 1
return count
# 测试目标lineheights
def testStandYesNo_withKeypoint(mode_pt=None, source=None):
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # 输出关键点顺序
# 关键点类别索引
kpClasses = ['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder',
'eft_elbow', 'right_elbow',
'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle']
model = YOLO(model=mode_pt) # 加载模型
# model.to(device)
result = model.predict(source=source, save=True)
standYesNoResult = dict() # 保存输出结果
# 遍历关键点模型输出结果
for imgres in result:
print('\n', imgres.path)
everyImg = []
for keyP, personBox in zip(imgres.keypoints, imgres.boxes):
kpList = keyP.data.tolist()
personBoxList = personBox.data.tolist()
# 关键点,左右肩部,左右臀部,左右膝盖,左右踝关节
leftShoulder = kpList[0][5] # 肩
rightShoulder = kpList[0][6]
leftHip = kpList[0][11] # 髋
rightHip = kpList[0][12]
leftKnee = kpList[0][13] # 膝盖
rightKnee = kpList[0][14]
leftAnkle = kpList[0][15] # 脚踝
rightAnkle = kpList[0][16]
# 关键点肢体夹角(髋处)与余弦,和存在与否置信度得分影响因子
angle1, cos1 = calculate_angle(leftShoulder, leftHip, leftKnee)
angle2, cos2 = calculate_angle(rightShoulder, rightHip, rightKnee)
real1 = count_numbers_greater_than_0_5([leftShoulder[-1], leftHip[-1], leftKnee[-1]])
real2 = count_numbers_greater_than_0_5([rightShoulder[-1], rightHip[-1], rightKnee[-1]])
# 关键点肢体夹角(膝盖处)与余弦,和存在与否置信度得分影响因子
angle3, cos3 = calculate_angle(leftHip, leftKnee, leftAnkle)
angle4, cos4 = calculate_angle(rightHip, rightKnee, rightAnkle)
real3 = count_numbers_greater_than_0_5([leftHip[-1], leftKnee[-1], leftAnkle[-1]])
real4 = count_numbers_greater_than_0_5([rightHip[-1], rightKnee[-1], rightAnkle[-1]])
print(cos1, cos2, cos3, cos4, real1, real2, real3, real4, 'person=', personBoxList)
# 姿势判断,是否站立
# 1.如果两个膝关节都是弯的,就不可能站立姿态。
if real3 == 3 and real4 == 3 and cos3 > -0.2 and cos4 > -0.2:
everyImg.append(
{'boxx': personBoxList, 'standingYN': 0, 'Description': "No standing"})
elif ((real1 == 3 and cos1 < -0.88) and (real2 == 3 and cos2 < -0.88)) or \
(((real1 == 3 and cos1 < -0.88) or (real2 == 3 and cos2 < -0.88)) and (
4 <= real3 + real4 <= 5) and cos3 < -0.707 and cos4 < -0.707): # 髋关节夹角的六个点都显现,置信度高。则高要求。
print('Standing-person=', personBoxList, ',there is a standing person', 'cos=', min(cos1, cos2))
everyImg.append(
{'boxx': personBoxList, 'standingYN': 1, 'Description': "Is standing"})
elif (real1 == 3 and real2 <= 2 and cos1 < -0.88) or (
real1 <= 2 and real2 == 3 and cos2 < -0.88): # 组成髋关节的三个点至少应该有一组是完整的,另一组不完整时有此表达式。
print('Standing-person=', personBoxList, ',there is a standing person', 'cos=', min(cos1, cos2))
everyImg.append(
{'boxx': personBoxList, 'standingYN': 1, 'Description': "Is standing"})
else:
everyImg.append(
{'boxx': personBoxList, 'standingYN': 0, 'Description': "No standing"})
standYesNoResult[imgres.path] = everyImg
print("over")
return standYesNoResult
if __name__ == '__main__':
mode_pt = r'D:/TanHaiyan/Models/yolov8/pose/yolov8l-pose.pt'
source = r"D:/TanHaiyan/Datasets/keypoints_pose2023/testKeypoint/"
res = testStandYesNo_withKeypoint(mode_pt, source)
print(res)
没有合适的资源?快使用搜索试试~ 我知道了~
yolov8-pose姿势估计,站立识别:动作检测-站姿-接口函数
共6个文件
jpg:3个
py:1个
pt:1个
15 下载量 146 浏览量
2023-11-27
22:43:08
上传
评论 2
收藏 80.84MB RAR 举报
温馨提示
基于yolov8-pose的姿势估计模式,实现站姿动作识别python接口和关键点pose模型。详细实现步骤参见博客-yolov8-pose姿势估计,站立识别。https://blog.csdn.net/beauthy/article/details/134651110?spm=1001.2014.3001.5501
资源推荐
资源详情
资源评论
收起资源包目录
StandingYesNoInterface.rar (6个子文件)
StandingYesNoInterface
interface_keypoints_for_standingDetect.py 6KB
testKeypoint
jinchen1.jpeg 941KB
jinchen2.jpg 1.34MB
jinchen3.jpg 50KB
jinchen4.jpg 197KB
pose
yolov8l-pose.pt 85.25MB
共 6 条
- 1
资源评论
柏常青
- 粉丝: 192
- 资源: 6
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功