# Vehicle Detection Project
This project uses **Yolov3** model to detect vechicles by keras.
Using **Convert_Yolo_model_to_keras.ipynb** to convert origina darknet-based yolo model in keras.
## Import Packages
```python
import io
import os
from collections import defaultdict
import colorsys
import random
import matplotlib.image as mpimg
import cv2
import numpy as np
import glob
import time
from keras.models import Model
from keras.utils.vis_utils import plot_model as plot
from keras.models import load_model
import matplotlib.pyplot as plt
%matplotlib inline
```
Using TensorFlow backend.
## Function
### intersection-over-union Calculation
Intersection over Union(IOU) is an evaluation metric used to measure the accuracy of an object detector on a particular dataset.
Any algorithm that provides predicted bounding boxes as output can be evaluated using IoU.
In order to apply Intersection over Union to evaluate an (arbitrary) object detector we need:
(1) The **ground-truth** bounding boxes (i.e., the hand labeled bounding boxes from the testing set that specify where in the image our object is).
(2) The predicted bounding boxes from our model.
As long as we have these two sets of bounding boxes we can apply Intersection over Union.
Below there is a visual example of a ground-truth bounding box(i.e.,**A**) versus a predicted bounding box(i.e.,**B**):
Predicted bounding boxes that heavily overlap with the ground-truth bounding boxes have higher scores than those with less overlap. This makes Intersection over Union an excellent metric for evaluating custom object detectors.
In this project,IOU function is used to filter predicted bounding boxes on one object.
![](resources/interval.jpg)
```python
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
```
```python
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def converxywh_to_xycoord(box):
"""
conver box centerx,y box width,height to x_min,x_max,y_min,y_max
"""
x, y, w, h = box[:4]
x_min,x_max,y_min,y_max = x - w/2.0,x + w/2.0,y - h/2.0,y + h/2.0
return x_min,x_max,y_min,y_max
def area(box):
"""
box region area
"""
x, y, w, h = box[:4]
return (w*h)
def box_iou(box1,box2):
"""
intersection-over-union Calculation used by nms filter
"""
x1_min,x1_max,y1_min,y1_max = converxywh_to_xycoord(box1)
x2_min,x2_max,y2_min,y2_max = converxywh_to_xycoord(box2)
intersect_w = _interval_overlap([x1_min,x1_max], [x2_min, x2_max])
intersect_h = _interval_overlap([y1_min, y1_max], [y2_min, y2_max])
intersect = intersect_w * intersect_h
union = area(box1) + area(box2)- intersect
return float(intersect) / union
```
### Decode network out
The feature map of yolo ouput can be divided into (grid_w,grid_h,nb_box,4.cood+1.confidence+classes).
![](resources/network_output.jpg)
### Bounding boxes with dimension priors and location prediction
![](resources/boundingbox.jpg)
```python
def decode_netout(networkouput, anchors, nb_class, obj_threshold = 0.1,nms_threshold = 0.3):
"""
decode network ouput to predict bounding box
"""
netout = networkouput.copy()
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _sigmoid(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row,col,b,5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + _sigmoid(x)+1) / grid_w # center position, unit: image width +1:to just output why??
y = (row + _sigmoid(y)+1) / grid_h # center position, unit: image height
w = anchors[b,0] * np.exp(w) / 416 # width accoding to network input shape in yolov3
h = anchors[b,1] * np.exp(h) / 416 # height accoding to network input shape in yolov3
confidence = netout[row,col,b,4]
classes = netout[row,col,b,5:]
boxes.append([x, y, w, h,confidence,classes])
return boxes
```
### NMS(non maximum suppression)
Non maximum suppression for Object Detection is to filter redundant bouding box sorted by every class using IOU.
```python
def nms_filter(boxes, nb_class, obj_threshold = 0.1, nms_threshold = 0.3):
#iterage each class
for c in range(nb_class):
#sorting by confidence
sorted_indices = list(reversed(np.argsort([box[5][c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i][5][c] == 0:
continue
else:
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
#filter bounding boxes
if box_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:
boxes[index_j][5][c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box[5][(np.argmax(box[5:]))] > obj_threshold]
return boxes
```
### Random colors
```python
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(1010) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),colors))
return colors
```
## YOLO Class
```python
class YOLO(object):
def __init__(self,model_path = 'model_data/yolo.h5',anchors_path = 'model_data/yolov3_anchors.txt',\
classes_path = 'model_data/coco_classes.txt',obj_threshold = 0.5, nms_threshold = 0.3):
self.model_path = model_path
self.anchors_path = anchors_path
self.classes_path = classes_path
self.obj_threshold = obj_threshold
self.nms_threshold = nms_threshold
self.class_names = self._get_class()
self.anchors = self._get_anchors()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def load_model(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 fil
yolo-vehicle-detection
1星 需积分: 11 184 浏览量
2018-09-02
20:40:34
上传
评论 1
收藏 98.08MB ZIP 举报
jwy2014
- 粉丝: 359
- 资源: 26
最新资源
- 记录了贪心,动态规划等算法基本思想与设计.zip
- 基于菲阿里基本模型,以及MACD RSI BooL 等技术指标 构建一套基于贪心算法策略的智能投顾模型.zip
- oj算法代码-贪心算法.zip
- 基于yolov8行人检测源码+模型.zip
- 公开整理-地级市-绿色专利申请、授权数据集(2000-2022年).xlsx
- 基于Transformer模型的图像质量评分模型实现源码+详细说明文档.zip
- CST电磁场仿真+线性螺旋电感+建模步骤细节和RLC端口配置+CST高级建模操作
- 大数据库实验的报告材料材料(word文档良心出品).doc
- AIS2024 valid
- 最入门的爬虫代码 python.docx
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈