# Vehicle Detection Project
This project uses **Yolov3** model to detect vechicles by keras.
Using **Convert_Yolo_model_to_keras.ipynb** to convert origina darknet-based yolo model in keras.
## Import Packages
```python
import io
import os
from collections import defaultdict
import colorsys
import random
import matplotlib.image as mpimg
import cv2
import numpy as np
import glob
import time
from keras.models import Model
from keras.utils.vis_utils import plot_model as plot
from keras.models import load_model
import matplotlib.pyplot as plt
%matplotlib inline
```
Using TensorFlow backend.
## Function
### intersection-over-union Calculation
Intersection over Union(IOU) is an evaluation metric used to measure the accuracy of an object detector on a particular dataset.
Any algorithm that provides predicted bounding boxes as output can be evaluated using IoU.
In order to apply Intersection over Union to evaluate an (arbitrary) object detector we need:
(1) The **ground-truth** bounding boxes (i.e., the hand labeled bounding boxes from the testing set that specify where in the image our object is).
(2) The predicted bounding boxes from our model.
As long as we have these two sets of bounding boxes we can apply Intersection over Union.
Below there is a visual example of a ground-truth bounding box(i.e.,**A**) versus a predicted bounding box(i.e.,**B**):
Predicted bounding boxes that heavily overlap with the ground-truth bounding boxes have higher scores than those with less overlap. This makes Intersection over Union an excellent metric for evaluating custom object detectors.
In this project,IOU function is used to filter predicted bounding boxes on one object.
![](resources/interval.jpg)
```python
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
```
```python
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def converxywh_to_xycoord(box):
"""
conver box centerx,y box width,height to x_min,x_max,y_min,y_max
"""
x, y, w, h = box[:4]
x_min,x_max,y_min,y_max = x - w/2.0,x + w/2.0,y - h/2.0,y + h/2.0
return x_min,x_max,y_min,y_max
def area(box):
"""
box region area
"""
x, y, w, h = box[:4]
return (w*h)
def box_iou(box1,box2):
"""
intersection-over-union Calculation used by nms filter
"""
x1_min,x1_max,y1_min,y1_max = converxywh_to_xycoord(box1)
x2_min,x2_max,y2_min,y2_max = converxywh_to_xycoord(box2)
intersect_w = _interval_overlap([x1_min,x1_max], [x2_min, x2_max])
intersect_h = _interval_overlap([y1_min, y1_max], [y2_min, y2_max])
intersect = intersect_w * intersect_h
union = area(box1) + area(box2)- intersect
return float(intersect) / union
```
### Decode network out
The feature map of yolo ouput can be divided into (grid_w,grid_h,nb_box,4.cood+1.confidence+classes).
![](resources/network_output.jpg)
### Bounding boxes with dimension priors and location prediction
![](resources/boundingbox.jpg)
```python
def decode_netout(networkouput, anchors, nb_class, obj_threshold = 0.1,nms_threshold = 0.3):
"""
decode network ouput to predict bounding box
"""
netout = networkouput.copy()
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _sigmoid(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row,col,b,5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + _sigmoid(x)+1) / grid_w # center position, unit: image width +1:to just output why??
y = (row + _sigmoid(y)+1) / grid_h # center position, unit: image height
w = anchors[b,0] * np.exp(w) / 416 # width accoding to network input shape in yolov3
h = anchors[b,1] * np.exp(h) / 416 # height accoding to network input shape in yolov3
confidence = netout[row,col,b,4]
classes = netout[row,col,b,5:]
boxes.append([x, y, w, h,confidence,classes])
return boxes
```
### NMS(non maximum suppression)
Non maximum suppression for Object Detection is to filter redundant bouding box sorted by every class using IOU.
```python
def nms_filter(boxes, nb_class, obj_threshold = 0.1, nms_threshold = 0.3):
#iterage each class
for c in range(nb_class):
#sorting by confidence
sorted_indices = list(reversed(np.argsort([box[5][c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i][5][c] == 0:
continue
else:
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
#filter bounding boxes
if box_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:
boxes[index_j][5][c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box[5][(np.argmax(box[5:]))] > obj_threshold]
return boxes
```
### Random colors
```python
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(1010) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),colors))
return colors
```
## YOLO Class
```python
class YOLO(object):
def __init__(self,model_path = 'model_data/yolo.h5',anchors_path = 'model_data/yolov3_anchors.txt',\
classes_path = 'model_data/coco_classes.txt',obj_threshold = 0.5, nms_threshold = 0.3):
self.model_path = model_path
self.anchors_path = anchors_path
self.classes_path = classes_path
self.obj_threshold = obj_threshold
self.nms_threshold = nms_threshold
self.class_names = self._get_class()
self.anchors = self._get_anchors()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def load_model(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 fil
没有合适的资源?快使用搜索试试~ 我知道了~
yolo-vehicle-detection
共31个文件
png:12个
jpg:9个
ipynb:2个
1星 需积分: 11 9 下载量 137 浏览量
2018-09-02
20:40:34
上传
评论 1
收藏 98.08MB ZIP 举报
温馨提示
yolo实现机车检测,精确度高。yolo实现机车检测,精确度高
资源推荐
资源详情
资源评论
收起资源包目录
CarND-Vehicle-Detection-YOLO-master.zip (31个子文件)
CarND-Vehicle-Detection-YOLO-master
test_videos
project_video.mp4 24.1MB
test_images
test1.jpg 212KB
test4.jpg 196KB
test2.jpg 170KB
test6.jpg 227KB
test3.jpg 144KB
test5.jpg 238KB
.gitignore 1KB
CarND-Vehicle-Detection-YOLO.ipynb 9.15MB
README.md 14KB
Convert_Yolo_model_to_keras.ipynb 79KB
resources
output_21_2.png 473KB
output_21_0.png 606KB
output_21_1.png 549KB
project.gif 43.52MB
interval.jpg 13KB
output_23_3.png 564KB
network_output.jpg 37KB
output_23_0.png 608KB
output_21_3.png 557KB
output_21_4.png 672KB
output_23_5.png 643KB
output_21_5.png 640KB
boundingbox.jpg 34KB
output_23_2.png 475KB
output_23_1.png 549KB
output_23_4.png 676KB
LICENSE 1KB
test_videos_output
project_video_output.mp4 18.57MB
model_data
yolov3_anchors.txt 77B
coco_classes.txt 625B
共 31 条
- 1
资源评论
- 517实验室2019-09-14知识一个api,没有模型,差评
jwy2014
- 粉丝: 359
- 资源: 26
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功