import pickle
import shutil
import numpy as np
import random
import os
import cv2
from tqdm import tqdm
class BBox:
# 人脸的box
def __init__(self, box):
self.left = box[0]
self.top = box[1]
self.right = box[2]
self.bottom = box[3]
self.x = box[0]
self.y = box[1]
self.w = box[2] - box[0]
self.h = box[3] - box[1]
def project(self, point):
"""将关键点的绝对值转换为相对于左上角坐标偏移并归一化
参数:
point:某一关键点坐标(x,y)
返回值:
处理后偏移
"""
x = (point[0] - self.x) / self.w
y = (point[1] - self.y) / self.h
return np.asarray([x, y])
def reproject(self, point):
"""将关键点的相对值转换为绝对值,与project相反
参数:
point:某一关键点的相对归一化坐标
返回值:
处理后的绝对坐标
"""
x = self.x + self.w * point[0]
y = self.y + self.h * point[1]
return np.asarray([x, y])
def reprojectLandmark(self, landmark):
"""对所有关键点进行reproject操作"""
p = np.zeros((len(landmark), 2))
for i in range(len(landmark)):
p[i] = self.reproject(landmark[i])
return p
def projectLandmark(self, landmark):
"""对所有关键点进行project操作"""
p = np.zeros((len(landmark), 2))
for i in range(len(landmark)):
p[i] = self.project(landmark[i])
return p
# 预处理数据,转化图像尺度并对像素归一
def processed_image(img, scale):
height, width, channels = img.shape
new_height = int(height * scale)
new_width = int(width * scale)
new_dim = (new_width, new_height)
img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR)
# 把图片转换成numpy值
image = np.array(img_resized).astype(np.float32)
# 转换成CHW
image = image.transpose((2, 0, 1))
# 归一化
image = (image - 127.5) / 128
return image
def IOU(box, boxes):
"""裁剪的box和图片所有人脸box的iou值
参数:
box:裁剪的box,当box维度为4时表示box左上右下坐标,维度为5时,最后一维为box的置信度
boxes:图片所有人脸box,[n,4]
返回值:
iou值,[n,]
"""
# box面积
box_area = (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
# boxes面积,[n,]
area = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
# 重叠部分左上右下坐标
xx1 = np.maximum(box[0], boxes[:, 0])
yy1 = np.maximum(box[1], boxes[:, 1])
xx2 = np.minimum(box[2], boxes[:, 2])
yy2 = np.minimum(box[3], boxes[:, 3])
# 重叠部分长宽
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# 重叠部分面积
inter = w * h
return inter / (box_area + area - inter + 1e-10)
def get_landmark_from_lfw_neg(txt, data_path, with_landmark=True):
"""获取txt中的图像路径,人脸box,人脸关键点
参数:
txt:数据txt文件
data_path:数据存储目录
with_landmark:是否留有关键点
返回值:
result包含(图像路径,人脸box,关键点)
"""
with open(txt, 'r') as f:
lines = f.readlines()
result = []
for line in lines:
line = line.strip()
components = line.split(' ')
# 获取图像路径
img_path = os.path.join(data_path, components[0]).replace('\\', '/')
# 人脸box
box = (components[1], components[3], components[2], components[4])
box = [float(_) for _ in box]
box = list(map(int, box))
if not with_landmark:
result.append((img_path, BBox(box)))
continue
# 五个关键点(x,y)
landmark = np.zeros((5, 2))
for index in range(5):
rv = (float(components[5 + 2 * index]), float(components[5 + 2 * index + 1]))
landmark[index] = rv
result.append((img_path, BBox(box), landmark))
return result
def get_landmark_from_celeba(data_path, with_landmark=True):
"""获取celeba的脸box,人脸关键点
参数:
bbox_txt:数据bbox文件
landmarks_txt:数据landmarks文件
data_path:数据存储目录
with_landmark:是否留有关键点
返回值:
result包含(图像路径,人脸box,关键点)
"""
bbox_txt = os.path.join(data_path, 'list_bbox_celeba.txt')
landmarks_txt = os.path.join(data_path, 'list_landmarks_celeba.txt')
# 获取图像路径,box,关键点
if not os.path.exists(bbox_txt):
return []
with open(bbox_txt, 'r') as f:
bbox_lines = f.readlines()
with open(landmarks_txt, 'r') as f:
landmarks_lines = f.readlines()
result = []
for i in range(2, len(bbox_lines)):
bbox_line = bbox_lines[i]
landmarks_line = landmarks_lines[i]
bbox_components = bbox_line.strip().split()
landmarks_components = landmarks_line.strip().split()
# 获取图像路径
img_path = os.path.join(data_path, 'img_celeba', bbox_components[0]).replace('\\', '/')
# 人脸box
box = (bbox_components[1], bbox_components[2], bbox_components[3], bbox_components[4])
box = [float(_) for _ in box]
box = list(map(int, box))
box = [box[0], box[1], box[2] + box[0], box[3] + box[1]]
if not with_landmark:
result.append((img_path, BBox(box)))
continue
# 五个关键点(x,y)
landmark = np.zeros((5, 2))
for index in range(5):
rv = (float(landmarks_components[1 + 2 * index]), float(landmarks_components[1 + 2 * index + 1]))
landmark[index] = rv
result.append((img_path, BBox(box), landmark))
return result
def combine_data_list(data_dir):
"""把每个数据列表放在同一个文件上
参数:
data_dir:已经裁剪后的文件夹
"""
npr = np.random
with open(os.path.join(data_dir, 'positive.txt'), 'r') as f:
pos = f.readlines()
with open(os.path.join(data_dir, 'negative.txt'), 'r') as f:
neg = f.readlines()
with open(os.path.join(data_dir, 'part.txt'), 'r') as f:
part = f.readlines()
with open(os.path.join(data_dir, 'landmark.txt'), 'r') as f:
landmark = f.readlines()
with open(os.path.join(data_dir, 'all_data_list.txt'), 'w') as f:
base_num = len(pos) // 1000 * 1000
s1 = '整理前的数据:neg数量:{} pos数量:{} part数量:{} landmark: {} 基数:{}'.format(len(neg), len(pos), len(part),
len(landmark), base_num)
print(s1)
# 打乱写入的数据顺序,并这里这里设置比例,设置size参数的比例就能得到数据集比例, 论文比例为:3:1:1:2
neg_keep = npr.choice(len(neg), size=base_num * 3, replace=base_num * 3 > len(neg))
part_keep = npr.choice(len(part), size=base_num, replace=base_num > len(part))
pos_keep = npr.choice(len(pos), size=base_num, replace=base_num > len(pos))
landmark_keep = npr.choice(len(landmark), size=base_num * 2, replace=base_num * 2 > len(landmark))
s2 = '整理后的数据:neg数量:{} pos数量:{} part数量:{} landmark数量:{}'.format(len(neg_keep), len(pos_keep),
len(part_keep), len(landmark_keep))
print(s2)
with open(os.path.join(data_dir, 'temp.txt'), 'a', encoding='utf-8') as f_temp:
f_temp.write('%s\n' % s1)
f_temp.
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
98%高识别率,解压就能用,有疑问请留言,提供答疑,保证使用!!!!! 使用简要解析https://blog.csdn.net/weixin_46611502/article/details/124318670
资源推荐
资源详情
资源评论
收起资源包目录
基于MTCNN+arcface的人脸检测识别(pytorch) (337个子文件)
.gitignore 50B
face.iml 555B
b0947d7c4ebe6095387501a84bd3d21d.jpeg 874KB
506fd85077bfe9e3bfc264080fac7fc4.jpeg 819KB
2a906b0d5f7ebfe8dfef3cb594181850.jpeg 584KB
dd0a3f931ed955c1aa1c761c58d8d456.jpeg 529KB
2f623f00283480c4c44f1038fad4a039.jpeg 480KB
886b319415621fb28a974a98bd29f1ae.jpeg 454KB
88f4d9c9879958b61c662b148f72c3b5.jpeg 445KB
e7ce5e65f7a17bdd39ed019481624de0.jpeg 408KB
331c2cc3f623439614e52a4cf326195d.jpeg 350KB
068e6afa5ec32f019971a8598cfc7ec9.jpeg 342KB
234cf9969cf3560356b3b644c46ef0dc.jpeg 330KB
ad850aeb2f9e59559ceb965b7157f45c.jpeg 318KB
5f179d363cee7c11deac166f46ba5f19.jpeg 300KB
581d31b940aea328e133e93f5a840e94.jpeg 281KB
08c7b06ddb138b9e81dea59fe46d38d6.jpeg 269KB
989f616d042a70583bfcccf31f9b78ae.jpeg 249KB
cfdd6c7f0c3a94a680bed6ae0ab3f140.jpeg 230KB
704c07ab45b08311e20c81d43739fc48.jpeg 158KB
8db7c3bb14555aa9dc3294ac04f6be06.jpeg 156KB
d10676722595ca7683e0582f5a0cc41c.jpeg 140KB
81cf2259fd18e6a49e20f87258d80570.jpeg 132KB
08bea73eed7ccf95ea747fa78c182edc.jpeg 131KB
e0174b95edb07c328a1d0a97098fa7f5.jpeg 126KB
57b9460f6e8f3f0619dc5ec1b1423445.jpeg 125KB
c5886afc0ab303fd0504c1712c6aef80.jpeg 120KB
144738a90c1b9db3b8b22f7acec6f89f.jpeg 113KB
e3225d6c2f254eb5afaeeb6c7a49d4c0.jpeg 109KB
51bc3144f35353a1260680b28dcfa12f.jpeg 108KB
de46a8640462cca92a8fc4d7e5c6e89f.jpeg 106KB
e0ce4c454365b439f1d8e400cdbe442f.jpeg 103KB
ce75d5ff5a02d0ad85d864e52bbb9777.jpeg 99KB
e734ffcf8425abbbdcc7649b2d78f516.jpeg 98KB
1.jpeg 98KB
2816bc48afedf5e68f10c1d73e358091.jpeg 96KB
533172b18cc4aa457ac95df1c487a91e.jpeg 93KB
681a063b6095a9e95eb0c864777f80eb.jpeg 90KB
7a52c96003c6406895d7a266f1d50922.jpeg 89KB
45ac6fd2f5d703cbf369b45ff6d6da7f.jpeg 89KB
1532375c60fbbae194e5653c595fa71a.jpeg 88KB
4fd5a6c883c387269eacaad67fdc512b.jpeg 88KB
ed101c15ab2dc2d914f98eac60edffd2.jpeg 87KB
c83c2cab61138f2c76dd1089edbaf0a4.jpeg 83KB
36cd07c06a28499bddbaac7ac4358155.jpeg 82KB
2bf3a5c3c7c887973c80e6c6c66bd275.jpeg 79KB
722f71cb070574248da0c7d117166357.jpeg 79KB
e02637943e5db2db227b6015074d8000.jpeg 78KB
88c2357762038663274b1a211f78cdcf.jpeg 78KB
a13827e53f06ae41e6af13f87233a525.jpeg 77KB
35f25010c9fc598b41457197616b166f.jpeg 75KB
3e3c0a06897af0fcd80e32ca6c1004be.jpeg 74KB
12ac3460982c73816134bbe13cf297cd.jpeg 74KB
77aef6fd850f215542b42e13aeac0c98.jpeg 73KB
42114158d63fc87e41c563ce2c740448.jpeg 72KB
dd0448d2754f99fc80184e8a2eef7132.jpeg 70KB
0567516d0fb2f5ab2c97e024ab71ea2b.jpeg 70KB
d3b8ca7588ddf3b3de62a827898a9b09.jpeg 67KB
edec0282900ae1e98727dce1ea4068a3.jpeg 66KB
054210a7a3a455960821e26d011c3ecc.jpeg 66KB
2202e00f9f69885790801d736c362e7e.jpeg 65KB
2335aef27a8e6f834e7b6a09780460b3.jpeg 65KB
003d470879f7e3b792c7fae8acf5bb08.jpeg 64KB
aaaad2f817d4f0b39004f149970974f2.jpeg 63KB
5540d5fcb3adb87124f8051d88512911.jpeg 63KB
6eebd96105a2eb86d946eacd37a5db6c.jpeg 62KB
215f8ce5c0d7e26877e36c117944fb83 (1).jpeg 62KB
65e2d570ab9a9b1f66e50400533c6bc0.jpeg 61KB
7bede10a1772fec0e00b1e935b5700b4.jpeg 61KB
87311dba142bdeecceb2cf2766b7a4e5.jpeg 61KB
b95f8d63c82ca35464c77b1ff320a237.jpeg 60KB
2fc067b920a85568ba4810ab360f4939.jpeg 60KB
1e7661d91685d58870529a6dddb22a44.jpeg 60KB
411609c18759e9dbb3961359873a9aaf.jpeg 58KB
006b945a0a12053d6c7e3b7fe61ea4e8.jpeg 57KB
2.jpeg 57KB
94f1a8ed1fa3219a8e0ea792ea921111.jpeg 56KB
aeda0f951c6454bb2a78aaa1c1764742.jpeg 56KB
9ae758901d82a63e799fb87667f33ae2.jpeg 55KB
96e9319a578d08ed47056d432e7d05dd.jpeg 55KB
c2e4308535392d0bcd02457779e21486.jpeg 55KB
9b3a3c3e5567219883f7a610751451de.jpeg 54KB
8b09a1059e113320eca4c17d3b81a0f4.jpeg 52KB
f91a43cc25227d211fa173f4057dd940.jpeg 52KB
21a7596e6fb4ea92cedd4ce2fc7d2edd.jpeg 51KB
d64bdbd0f4c446af62a9dbeb1ee72c7e.jpeg 50KB
68745da3cbb070394edbbe7c3e14c06d.jpeg 50KB
f0ea9726d86077d53e3153146da94b22.jpeg 49KB
4f0a2f6e447bcc188ec555f3a6b314fc.jpeg 49KB
8984036404fb67bfd87407b317a0b70b.jpeg 47KB
d7b0fdf95c822e139b7828f7b6abcfa4.jpeg 46KB
3124b5349637c8f677f5dde444f56222.jpeg 46KB
cdb7c9c23a993691c23c313d8d71c64c.jpeg 46KB
37eb599c6fe0488c636988aced47e912.jpeg 46KB
6eb77db1e37c460992a744ba4fe2ed53.jpeg 45KB
7055b26ead132c5e6ad6b75c117aa606.jpeg 44KB
3c4ae6c2e81cf0f37bdf79f88a8701c1.jpeg 44KB
5d56142d81d3fdc5ef6ce42788922b32.jpeg 44KB
9d5d9ff5ff222b10bb4abc772d294259.jpeg 43KB
dad1500890b35ac73f554d9c2050444f.jpeg 43KB
共 337 条
- 1
- 2
- 3
- 4
单黎明
- 粉丝: 16
- 资源: 4
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
- 4
- 5
- 6
前往页