#include "FacialEmotion.h"
static float fast_exp(float x)
{
union {
uint32_t i;
float f;
} v{};
v.i = (1 << 23) * (1.4426950409 * x + 126.93490512f);
return v.f;
}
static inline float sigmoid(float x)
{
return 1.0f / (1.0f + fast_exp(-x));
}
static float intersection_area(const Object& a, const Object& b)
{
cv::Rect_<float> inter = a.rect & b.rect;
return inter.area();
}
static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;
while (i <= j)
{
while (faceobjects[i].prob > p)
i++;
while (faceobjects[j].prob < p)
j--;
if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);
i++;
j--;
}
}
// #pragma omp parallel sections
{
// #pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
// #pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}
static void qsort_descent_inplace(std::vector<Object>& faceobjects)
{
if (faceobjects.empty())
return;
qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}
static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();
const int n = faceobjects.size();
std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].rect.width * faceobjects[i].rect.height;
}
for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];
// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}
if (keep)
picked.push_back(i);
}
}
static void generate_grids_and_stride(const int target_w, const int target_h,
std::vector<int>& strides, std::vector<GridAndStride>& grid_strides)
{
for (int i = 0; i < (int)strides.size(); i++)
{
int stride = strides[i];
int num_grid_w = target_w / stride;
int num_grid_h = target_h / stride;
for (int g1 = 0; g1 < num_grid_h; g1++)
{
for (int g0 = 0; g0 < num_grid_w; g0++)
{
GridAndStride gs;
gs.grid0 = g0;
gs.grid1 = g1;
gs.stride = stride;
grid_strides.push_back(gs);
}
}
}
}
static void generate_proposals(std::vector<GridAndStride> grid_strides,
const ncnn::Mat& pred, float prob_threshold, std::vector<Object>& objects)
{
const int num_points = grid_strides.size();
const int num_class = 7;
const int reg_max_1 = 16;
for (int i = 0; i < num_points; i++) //out.h
{
const float* scores = pred.row(i) + 4 * reg_max_1;
// find label with max score
int label = -1;
float score = -FLT_MAX;
for (int k = 0; k < num_class; k++)
{
float confidence = scores[k];
if (confidence > score)
{
label = k;
score = confidence;
}
}
float box_prob = sigmoid(score);
if (box_prob >= prob_threshold)
{
ncnn::Mat bbox_pred(reg_max_1, 4, (void*)pred.row(i));
{
ncnn::Layer* softmax = ncnn::create_layer(ncnn::layer_to_index("Softmax"));
// ncnn::layer_to_index("Softmax")
ncnn::ParamDict pd;
pd.set(0, 1); // axis
// pd.set(1, 1);
softmax->load_param(pd);
ncnn::Option opt;
opt.num_threads = 1;
opt.use_packing_layout = false;
softmax->create_pipeline(opt);
softmax->forward_inplace(bbox_pred, opt);
softmax->destroy_pipeline(opt);
delete softmax;
}
float pred_ltrb[4];
for (int k = 0; k < 4; k++)
{
float dis = 0.f;
const float* dis_after_sm = bbox_pred.row(k);
for (int l = 0; l < reg_max_1; l++)
{
dis += l * dis_after_sm[l];
}
pred_ltrb[k] = dis * grid_strides[i].stride;
}
float pb_cx = (grid_strides[i].grid0 + 0.5f) * grid_strides[i].stride;
float pb_cy = (grid_strides[i].grid1 + 0.5f) * grid_strides[i].stride;
float x0 = pb_cx - pred_ltrb[0];
float y0 = pb_cy - pred_ltrb[1];
float x1 = pb_cx + pred_ltrb[2];
float y1 = pb_cy + pred_ltrb[3];
Object obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = x1 - x0;
obj.rect.height = y1 - y0;
obj.label = label;
obj.prob = box_prob;
objects.push_back(obj);
}
}
}
//调用ncnn转置操作
static void transpose(const ncnn::Mat& in, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 1;
opt.use_fp16_storage = false;
opt.use_packing_layout = true;
ncnn::Layer* op = ncnn::create_layer("Permute");
// set param
ncnn::ParamDict pd;
pd.set(0, 1);// order_type=1
op->load_param(pd);
op->create_pipeline(opt);
op->forward(in,out, opt);
op->destroy_pipeline(opt);
delete op;
}
FacialEmotion::FacialEmotion()
{
blob_pool_allocator.set_size_compare_ratio(0.f);
workspace_pool_allocator.set_size_compare_ratio(0.f);
}
int FacialEmotion::load(std::string parma_path,std::string bin_path,int _target_size,bool use_gpu)
{
yolo.clear();
blob_pool_allocator.clear();
workspace_pool_allocator.clear();
ncnn::set_cpu_powersave(2);
ncnn::set_omp_num_threads(ncnn::get_big_cpu_count());
yolo.opt = ncnn::Option();
#if NCNN_VULKAN
yolo.opt.use_vulkan_compute = use_gpu;
#endif
yolo.opt.num_threads = 2;
yolo.opt.blob_allocator = &blob_pool_allocator;
yolo.opt.workspace_allocator = &workspace_pool_allocator;
yolo.load_param(parma_path.c_str());
yolo.load_model(bin_path.c_str());
target_size = _target_size;
return 0;
}
int FacialEmotion::detect(const cv::Mat& rgb, std::vector<Object>& objects, float prob_threshold, float nms_threshold)
{
int width = rgb.cols;
int height = rgb.rows;
// pad to multiple of 32
int w = width;
int h = height;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}
ncnn::Mat in = ncnn::Mat::from_pixels_resize(rgb.data, ncnn::Mat::PIXEL_BGR, width, height, w, h);
// pad to target_size rectangle
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 0.f);
in_pad.substract_mean_normalize(0, norm_vals);
ncnn::Extractor ex = yolo.create_extractor();
ex.input("images", in_pad);
std::vector<Object> proposals;
ncnn::Mat out;
ex.extract("/model.22/Concat_3_output_0", out);
ncnn::Mat out1;
transpose(out, out1);
std::vector<int> strides = {8, 16, 32}; // might have stride=64
s
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
七种表情识别是一个多学科交叉的研究领域,它结合了心理学、认知科学、计算机视觉和机器学习等学科的知识和技术。 - **表情的定义**:表情是人们在情绪体验时面部肌肉活动的结果,是人类情感交流的基本方式之一。 - **基本表情理论**:心理学家Paul Ekman提出,人类有七种基本情绪,每种情绪都有其特定的面部表情模式。 ### 七种基本表情 1. **快乐**:通常与积极情绪相关,特征是嘴角上扬,眼睛周围肌肉收缩。 2. **悲伤**:与失落或痛苦相关,特征是眉毛下垂,嘴角下拉。 3. **愤怒**:与愤怒或挫败相关,特征是眉毛下压,嘴唇紧闭。 4. **惊讶**:与意外或震惊相关,特征是眼睛和嘴巴张开。 5. **恐惧**:与害怕或焦虑相关,特征是眼睛瞪大,眉毛提升。 6. **厌恶**:与反感或不喜欢相关,特征是上唇提升,嘴角下拉。 7. **轻蔑**:与鄙视或不屑一顾相关,特征是嘴角一侧上扬。
资源推荐
资源详情
资源评论
收起资源包目录
基于yolov8的面部七种表情识别C++部署工程 (457个子文件)
cmd.bat 15B
yolov8s_emo-opt.bin 21.34MB
FacialEmotion.cpp 11KB
main.cpp 710B
Browse.VC.db 48.23MB
Solution.VC.db 428KB
opencv_world450.dll 58.44MB
yolov8_facial_emotion.exe 5.06MB
yolov8_facial_emotion.vcxproj.filters 1KB
vulkan_core.h 531KB
core_c.h 129KB
Types.h 87KB
msa_macros.h 82KB
types_c.h 72KB
kmeans_index.h 68KB
mat.h 59KB
intermediate.h 57KB
imgproc_c.h 51KB
localintermediate.h 43KB
dist.h 42KB
ShaderLang.h 41KB
SpvBuilder.h 39KB
hex_float.h 39KB
ConstantUnion.h 36KB
cvdef.h 35KB
SymbolTable.h 34KB
constants_c.h 31KB
ParseHelper.h 28KB
cv_cpu_helper.h 27KB
hierarchical_clustering_index.h 27KB
glslang_tab.cpp.h 27KB
PpContext.h 25KB
vulkan_beta.h 23KB
Versions.h 22KB
autotuned_index.h 21KB
kdtree_single_index.h 21KB
kdtree_index.h 21KB
BaseTypes.h 20KB
lsh_table.h 19KB
types_c.h 18KB
spvIR.h 17KB
lsh_index.h 16KB
result_set.h 15KB
c_api.h 14KB
vulkan_win32.h 14KB
gpu.h 13KB
parseVersions.h 13KB
allocator.h 12KB
iomapper.h 12KB
simplestl.h 12KB
SPVRemapper.h 12KB
PoolAlloc.h 11KB
arrays.h 11KB
index_testing.h 11KB
highgui_c.h 11KB
simpleocv.h 10KB
gl_types.h 10KB
glslang_c_interface.h 10KB
Scan.h 9KB
Common.h 9KB
any.h 9KB
layer_shader_type_enum.h 8KB
cv_cpu_dispatch.h 8KB
reflection.h 8KB
net.h 8KB
doc.h 8KB
vulkan_header_fix.h 8KB
vk_layer.h 8KB
hdf5.h 7KB
glslang_c_shader_types.h 7KB
layer.h 7KB
platform.h 6KB
allocator.h 6KB
LiveTraverser.h 6KB
composite_index.h 6KB
nn_index.h 6KB
all_indices.h 6KB
ShHandle.h 6KB
saving.h 6KB
simplex_downhill.h 6KB
vk_icd.h 6KB
videoio_c.h 6KB
PpTokens.h 6KB
calib3d_c.h 5KB
InfoSink.h 5KB
ResourceLimits.h 5KB
Initialize.h 5KB
cap_ios.h 5KB
interface.h 5KB
command.h 5KB
vulkan_android.h 5KB
dynamic_bitset.h 5KB
defines.h 5KB
attribute.h 4KB
option.h 4KB
random.h 4KB
heap.h 4KB
GLSL.std.450.h 4KB
params.h 4KB
logger.h 4KB
共 457 条
- 1
- 2
- 3
- 4
- 5
资源评论
- peter200223072024-09-13这个资源内容超赞,对我来说很有价值,很实用,感谢大佬分享~
知来者逆
- 粉丝: 11w+
- 资源: 89
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- Spring Cloud商城项目专栏 049 支付
- sensors-18-03721.pdf
- Facebook.apk
- 推荐一款JTools的call-this-method插件
- json的合法基色来自红包东i请各位
- 项目采用YOLO V4算法模型进行目标检测,使用Deep SORT目标跟踪算法 .zip
- 针对实时视频流和静态图像实现的对象检测和跟踪算法 .zip
- 部署 yolox 算法使用 deepstream.zip
- 基于webmagic、springboot和mybatis的MagicToe Java爬虫设计源码
- 通过实时流协议 (RTSP) 使用 Yolo、OpenCV 和 Python 进行深度学习的对象检测.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功