import cv2
import numpy as np
import os
import time
import parso
from SVM_Train import SVM
import SVM_Train
from args import args
class PlateRecognition():
def __init__(self):
self.SZ = args.Size # 训练图片长宽
self.MAX_WIDTH = args.MAX_WIDTH # 原始图片最大宽度
self.Min_Area = args.Min_Area # 车牌区域允许最大面积
self.PROVINCE_START = args.PROVINCE_START
self.provinces = args.provinces
self.cardtype = args.cardtype
self.Prefecture = args.Prefecture
self.cfg = args.Pic_size
# 读取图片文件
def __imreadex(self, filename):
return cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_COLOR)
def __point_limit(self, point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
# 利用投影法,根据设定的阈值和图片直方图,找出波峰,用于分隔字符
def __find_waves(self, threshold, histogram):
up_point = -1 # 上升点
is_peak = False
if histogram[0] > threshold:
up_point = 0
is_peak = True
wave_peaks = []
for i, x in enumerate(histogram):
if is_peak and x < threshold:
if i - up_point > 2:
is_peak = False
wave_peaks.append((up_point, i))
elif not is_peak and x >= threshold:
is_peak = True
up_point = i
if is_peak and up_point != -1 and i - up_point > 4:
wave_peaks.append((up_point, i))
return wave_peaks
# 根据找出的波峰,分隔图片,从而得到逐个字符图片
def __seperate_card(self, img, waves):
part_cards = []
for wave in waves:
part_cards.append(img[:, wave[0]:wave[1]])
return part_cards
# 缩小车牌边界
def __accurate_place(self, card_img_hsv, limit1, limit2, color):
row_num, col_num = card_img_hsv.shape[:2]
xl = col_num
xr = 0
yh = 0
yl = row_num
# col_num_limit = self.cfg["col_num_limit"]
row_num_limit = self.cfg["row_num_limit"]
col_num_limit = col_num * 0.8 if color != "green" else col_num * 0.5 # 绿色有渐变
for i in range(row_num):
count = 0
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > col_num_limit:
if yl > i:
yl = i
if yh < i:
yh = i
for j in range(col_num):
count = 0
for i in range(row_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > row_num - row_num_limit:
if xl > j:
xl = j
if xr < j:
xr = j
return xl, xr, yh, yl
# 预处理
def __preTreatment(self, car_pic):
if type(car_pic) == type("openc"):
img = self.__imreadex(car_pic)
else:
img = car_pic
pic_hight, pic_width = img.shape[:2]
if pic_width > self.MAX_WIDTH:
resize_rate = self.MAX_WIDTH / pic_width
img = cv2.resize(img, (self.MAX_WIDTH, int(pic_hight * resize_rate)),
interpolation=cv2.INTER_AREA) # 图片分辨率调整
# cv2.imshow('Image', img)
'''
# 代码后期添加
# 用于处理不同亮度时色调整
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dark_point = (gray<40)
target_array = gray[dark_point]
datk_size = int(target_array.size / gray.size * 100)
# datk_size为暗色占比
# img = cv2.addWeighted(img, 1, img, 2, 40) # 调整亮度
# img = cv2.addWeighted(img, 1.5, img, 0.5, 1) # 调整对比度
'''
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) # 定义一个核
img = cv2.filter2D(img, -1, kernel=kernel) # 锐化
blur = self.cfg["blur"]
# 高斯去噪
if blur > 0:
img = cv2.GaussianBlur(img, (blur, blur), 0)
oldimg = img
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((20, 20), np.uint8)
img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) # 开运算
img_opening = cv2.addWeighted(img, 1, img_opening, -1, 0); # 与上一次开运算结果融合
# 找到图像边缘
ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 二值化
img_edge = cv2.Canny(img_thresh, 100, 200)
# cv2.imshow('img_edge', img_edge)
# 使用开运算和闭运算让图像边缘成为一个整体
kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]), np.uint8)
img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel) # 闭运算
img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel) # 开运算
# cv2.imshow('img_edge2', img_edge2)
# 查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
try:
image, contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
# ValueError: not enough values to unpack (expected 3, got 2)
# cv2.findContours方法在高版本OpenCV中只返回两个参数
contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cnt for cnt in contours if cv2.contourArea(cnt) > self.Min_Area]
# 逐个排除不是车牌的矩形区域
car_contours = []
for cnt in contours:
# 框选 生成最小外接矩形 返回值(中心(x,y), (宽,高), 旋转角度)
rect = cv2.minAreaRect(cnt)
# print('宽高:',rect[1])
area_width, area_height = rect[1]
# 选择宽大于高的区域
if area_width < area_height:
area_width, area_height = area_height, area_width
wh_ratio = area_width / area_height
# print('宽高比:',wh_ratio)
# 要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
if wh_ratio > 2 and wh_ratio < 5.5:
car_contours.append(rect)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# 框出所有可能的矩形
# oldimg = cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
# cv2.imshow("Test",oldimg )
# 矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
card_imgs = []
for rect in car_contours:
if rect[2] > -1 and rect[2] < 1: # 创造角度,使得左、高、右、低拿到正确的值
angle = 1
else:
angle = rect[2]
rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle) # 扩大范围,避免车牌边缘被排除
box = cv2.boxPoints(rect)
heigth_point = right_point = [0, 0]
left_point = low_point = [pic_width, pic_hight]
for point in box:
if left_point[0] > point[0]:
left_point = point
if low_point[1] > point[1]:
low_point = point
if heigth_point[1] < point[1]:
heigth_point = point
if right_point
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
本人在此项目拥有的功能:根据图片识别图片中的车牌号、GUI界面设计、导出识别数据到Excel文件中的功能基础上,添加了运用opencv摄像头实时识别车牌的功能;与传统识别方法不同的是,将opencv摄像头识别与此项目结合,可以极大的提高识别的效率和准确率。同时,本人优化了原项目的识别模块的函数,也极大的优化了识别的速度和效率,希望大家多多交流,多多学习!!!!!
资源推荐
资源详情
资源评论





















收起资源包目录





































































































共 16468 条
- 1
- 2
- 3
- 4
- 5
- 6
- 165
资源评论


Svan.
- 粉丝: 827
- 资源: 27
上传资源 快速赚钱
我的内容管理 收起
我的资源 快来上传第一个资源
我的收益
登录查看自己的收益我的积分 登录查看自己的积分
我的C币 登录后查看C币余额
我的收藏
我的下载
下载帮助


会员权益专享
安全验证
文档复制为VIP权益,开通VIP直接复制
