# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.fli
没有合适的资源?快使用搜索试试~ 我知道了~
YOLOv5车辆橡胶轮胎检测+训练好的模型+pyqt界面+标注好的数据集
共1468个文件
jpg:451个
xml:444个
txt:443个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
5星 · 超过95%的资源 5 下载量 63 浏览量
2022-06-29
21:24:47
上传
评论 1
收藏 179.39MB RAR 举报
温馨提示
1、yolov5汽车轮胎检测,包含训练好的汽车轮胎识别权重,以及PR曲线,loss曲线等等,在汽车轮胎检测据集中训练得到的权重,类别名为tire,标签格式为txt和xml两种,分别保存在两个文件夹中 2、有pyqt界面,可检测图片、视频和调用摄像头 2、数据集和检测结果参考:https://blog.csdn.net/zhiqingAI/article/details/124230743 3、采用pytrch框架,python代码
资源推荐
资源详情
资源评论
收起资源包目录
YOLOv5车辆橡胶轮胎检测+训练好的模型+pyqt界面+标注好的数据集 (1468个子文件)
events.out.tfevents.1656334049.DESKTOP-Q196ELQ.13956.0 871KB
results.csv 35KB
Dockerfile 2KB
Dockerfile 821B
.dockerignore 4KB
screenshot.gif 4.7MB
.gitattributes 75B
.gitignore 50B
.gitignore 0B
yolov5-pyqt5.iml 573B
tutorial.ipynb 385KB
tire_145.jpg 3.34MB
tire_395.jpg 2.37MB
tire_392.jpg 2.27MB
tire_183.jpg 2.16MB
tire_331.jpg 2.05MB
tire_179.jpg 1.98MB
tire_404.jpg 1.94MB
tire_234.jpg 1.92MB
tire_292.jpg 1.88MB
tire_226.jpg 1.88MB
tire_382.jpg 1.88MB
tire_376.jpg 1.84MB
tire_169.jpg 1.84MB
tire_318.jpg 1.83MB
tire_317.jpg 1.77MB
tire_279.jpg 1.64MB
tire_326.jpg 1.63MB
tire_273.jpg 1.6MB
tire_414.jpg 1.52MB
tire_361.jpg 1.43MB
tire_400.jpg 1.29MB
tire_112.jpg 1.2MB
tire_0.jpg 1.09MB
tire_28.jpg 1.03MB
tire_104.jpg 1.01MB
tire_154.jpg 976KB
tire_64.jpg 900KB
tire_134.jpg 859KB
tire_101.jpg 838KB
tire_86.jpg 805KB
tire_96.jpg 751KB
tire_57.jpg 732KB
tire_32.jpg 672KB
tire_93.jpg 606KB
tire_56.jpg 554KB
tire_189.jpg 527KB
tire_232.jpg 516KB
tire_92.jpg 490KB
train_batch2.jpg 488KB
train_batch1.jpg 485KB
bus.jpg 476KB
train_batch0.jpg 466KB
tire_95.jpg 442KB
val_batch0_pred.jpg 436KB
tire_67.jpg 430KB
val_batch0_labels.jpg 424KB
tire_72.jpg 406KB
tire_97.jpg 388KB
tire_125.jpg 387KB
tire_4.jpg 363KB
tire_110.jpg 329KB
tire_53.jpg 310KB
tire_221.jpg 310KB
tire_41.jpg 309KB
2022-06-29-21-20-30.jpg 294KB
tire_197.jpg 292KB
tire_142.jpg 253KB
tire_90.jpg 246KB
tire_213.jpg 239KB
tire_27.jpg 234KB
tire_496.jpg 223KB
labels_correlogram.jpg 216KB
tire_250.jpg 211KB
tire_252.jpg 209KB
tire_255.jpg 209KB
tire_94.jpg 206KB
tire_121.jpg 205KB
tire_215.jpg 202KB
tire_237.jpg 198KB
tire_161.jpg 193KB
tire_315.jpg 185KB
tire_483.jpg 184KB
tire_229.jpg 183KB
tire_297.jpg 181KB
tire_175.jpg 181KB
tire_283.jpg 180KB
tire_378.jpg 180KB
tire_236.jpg 179KB
tire_249.jpg 179KB
tire_374.jpg 179KB
tire_278.jpg 178KB
tire_350.jpg 178KB
tire_334.jpg 177KB
tire_190.jpg 177KB
tire_6.jpg 175KB
tire_217.jpg 175KB
tire_219.jpg 172KB
tire_356.jpg 170KB
tire_305.jpg 170KB
共 1468 条
- 1
- 2
- 3
- 4
- 5
- 6
- 15
资源评论
- qq_371465762024-03-25超赞的资源,感谢资源主分享,大家一起进步!
- Kelvin20202023-03-07发现一个超赞的资源,赶紧学习起来,大家一起进步,支持!
- Ning__gg2024-01-28感谢大佬分享的资源,对我启发很大,给了我新的灵感。
- m0_749941212023-03-17资源不错,对我启发很大,获得了新的灵感,受益匪浅。
- 普通网友2022-08-29资源很实用,内容详细,值得借鉴的内容很多,感谢分享。
stsdddd
- 粉丝: 2w+
- 资源: 665
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 论文(最终)_20240430235101.pdf
- 基于python编写的Keras深度学习框架开发,利用卷积神经网络CNN,快速识别图片并进行分类
- 最全空间计量实证方法(空间杜宾模型和检验以及结果解释文档).txt
- 5uonly.apk
- 蓝桥杯Python组的历年真题
- 2023-04-06-项目笔记 - 第一百十九阶段 - 4.4.2.117全局变量的作用域-117 -2024.04.30
- 2023-04-06-项目笔记 - 第一百十九阶段 - 4.4.2.117全局变量的作用域-117 -2024.04.30
- 前端开发技术实验报告:内含4四实验&实验报告
- Highlight Plus v20.0.1
- 林周瑜-论文.docx
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功