import glob
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import math
import numpy as np
import torch
import torchvision
import yaml
from utils.google_utils import gsutil_getsize
from utils.metrics import fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
# format short g, %precision=5
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})
# prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
cv2.setNumThreads(0)
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def check_git_status():
# Suggest 'git pull' if repo is out of date
if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
s = subprocess.check_output(
'if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
if 'Your branch is behind' in s:
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' %
(img_size, s, new_size))
return new_size
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), 'File Not Found: %s' % file # assert file was found
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (
file, files) # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve()
for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' %
[str(x) for x in val if not x.exists()])
if s and len(s): # download script
print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
torch.hub.download_url_to_file(s, f)
r = os.system('unzip -q %s -d ../ && rm %s' %
(f, f)) # unzip
else: # bash script
r = os.system(s)
print('Dataset autodownload %s\n' % ('success' if r ==
0 else 'failure')) # analyze return value
else:
raise Exception('Dataset not found.')
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array(
[np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0],
img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / \
2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
# yolov5+deepsort道路流量检测系统 可视化界面 课程作业 1. 包含使用yolov5训练得的车辆和行人检测模型; 2. 包含deepsort车辆和行人跟踪模型; 3. 通过yolov5+deepsort实现视频中车辆和行人的计数; 4. 使用pyqt5实现可视化界面,显示视频并显示车辆和行人的累计数量和实时流量; 5. 代码简洁高效,功能均可以正常运行,放心使用。
资源推荐
资源详情
资源评论
收起资源包目录
yolov5+deepsort道路流量检测系统 可视化界面 课程作业.zip (86个子文件)
道路流量监测系统(yolov5+deepsort)
AIDetector_pytorch.py 2KB
weights
yolov5m.pt 41.92MB
main.py 9KB
tracker.py 3KB
utils
__init__.py 0B
google_utils.py 5KB
metrics.py 8KB
BaseDetector.py 988B
autoanchor.py 7KB
general.py 19KB
activations.py 2KB
torch_utils.py 9KB
result.mp4 2.25MB
MVI_20011.mp4 7.37MB
deep_sort
configs
deep_sort.yaml 200B
utils
evaluation.py 3KB
__init__.py 0B
draw.py 1KB
parser.py 1000B
log.py 463B
asserts.py 316B
io.py 4KB
tools.py 734B
json_logger.py 11KB
deep_sort
__init__.py 500B
sort
track.py 5KB
kalman_filter.py 8KB
__init__.py 0B
detection.py 1KB
tracker.py 5KB
iou_matching.py 3KB
preprocessing.py 2KB
nn_matching.py 5KB
__pycache__
tracker.cpython-38.pyc 5KB
kalman_filter.cpython-38.pyc 7KB
linear_assignment.cpython-36.pyc 7KB
iou_matching.cpython-36.pyc 3KB
kalman_filter.cpython-37.pyc 7KB
track.cpython-38.pyc 5KB
track.cpython-36.pyc 5KB
preprocessing.cpython-38.pyc 2KB
tracker.cpython-37.pyc 5KB
linear_assignment.cpython-38.pyc 7KB
linear_assignment.cpython-37.pyc 7KB
tracker.cpython-36.pyc 5KB
iou_matching.cpython-37.pyc 3KB
detection.cpython-38.pyc 2KB
detection.cpython-36.pyc 2KB
nn_matching.cpython-37.pyc 6KB
preprocessing.cpython-36.pyc 2KB
__init__.cpython-37.pyc 231B
iou_matching.cpython-38.pyc 3KB
nn_matching.cpython-36.pyc 6KB
preprocessing.cpython-37.pyc 2KB
detection.cpython-37.pyc 2KB
__init__.cpython-36.pyc 227B
track.cpython-37.pyc 5KB
__init__.cpython-38.pyc 182B
nn_matching.cpython-38.pyc 6KB
kalman_filter.cpython-36.pyc 7KB
linear_assignment.py 8KB
deep_sort.py 4KB
deep
__init__.py 0B
checkpoint
.gitkeep 0B
ckpt.t7 43.9MB
evaluate.py 293B
feature_extractor.py 2KB
model.py 3KB
original_model.py 3KB
train.jpg 59KB
train.py 6KB
__pycache__
model.cpython-38.pyc 3KB
feature_extractor.cpython-37.pyc 3KB
model.cpython-36.pyc 3KB
feature_extractor.cpython-38.pyc 2KB
__init__.cpython-37.pyc 231B
model.cpython-37.pyc 3KB
feature_extractor.cpython-36.pyc 3KB
__init__.cpython-36.pyc 227B
__init__.cpython-38.pyc 182B
test.py 2KB
models
__init__.py 0B
yolov5m.yaml 1KB
common.py 4KB
experimental.py 6KB
yolo.py 10KB
共 86 条
- 1
资源评论
- m0_561917752023-09-05不可以用啊,资源损坏,打不开两只程序猿2023-09-07已经退款了~ 之前上传系统出现问题,资源已经修复了,大家如果需要可以放心购买~
- 2301_774853122024-04-22非常有用的资源,可以直接使用,对我很有用,果断支持!
- 偷鱼的噬元兽2024-03-01资源使用价值高,内容详实,给了我很多新想法,感谢大佬分享~
- liushenxue2023-12-14资源不错,对我启发很大,获得了新的灵感,受益匪浅。
两只程序猿
- 粉丝: 350
- 资源: 158
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功