#coding=utf-8
import cv2
import numpy as np
# from matplotlib import pyplot as plt
import scipy.ndimage.filters as f
import scipy
import time
import scipy.signal as l
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras import backend as K
K.set_image_dim_ordering('tf')
def Getmodel_tensorflow(nb_classes):
# nb_classes = len(charset)
img_rows, img_cols = 23, 23
# number of convolutional filters to use
nb_filters = 16
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# x = np.load('x.npy')
# y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
# weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
# weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols,1)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Conv2D(nb_filters, (nb_conv, nb_conv)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Flatten())
model.add(Dense(256))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
return model
def Getmodel_tensorflow_light(nb_classes):
# nb_classes = len(charset)
img_rows, img_cols = 23, 23
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# x = np.load('x.npy')
# y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
# weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
# weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols, 1)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Conv2D(nb_filters, (nb_conv * 2, nb_conv * 2)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Flatten())
model.add(Dense(32))
# model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = Getmodel_tensorflow_light(3)
model2 = Getmodel_tensorflow(3)
import os
model.load_weights("./model/char_judgement1.h5")
# model.save("./model/char_judgement1.h5")
model2.load_weights("./model/char_judgement.h5")
# model2.save("./model/char_judgement.h5")
model = model2
def get_median(data):
data = sorted(data)
size = len(data)
# print size
if size % 2 == 0: # 判断列表长度为偶数
median = (data[size//2]+data[size//2-1])//2
data[0] = median
if size % 2 == 1: # 判断列表长度为奇数
median = data[(size-1)//2]
data[0] = median
return data[0]
import time
def searchOptimalCuttingPoint(rgb,res_map,start,width_boundingbox,interval_range):
t0 = time.time()
#
# for x in xrange(10):
# res_map = np.vstack((res_map,res_map[-1]))
length = res_map.shape[0]
refine_s = -2;
if width_boundingbox>20:
refine_s = -9
score_list = []
interval_big = int(width_boundingbox * 0.3) #
p = 0
for zero_add in range(start,start+50,3):
# for interval_small in xrange(-0,width_boundingbox/2):
for i in range(-8,int(width_boundingbox/1)-8):
for refine in range(refine_s, int(width_boundingbox/2+3)):
p1 = zero_add# this point is province
p2 = p1 + width_boundingbox +refine #
p3 = p2 + width_boundingbox + interval_big+i+1
p4 = p3 + width_boundingbox +refine
p5 = p4 + width_boundingbox +refine
p6 = p5 + width_boundingbox +refine
p7 = p6 + width_boundingbox +refine
if p7>=length:
continue
score = res_map[p1][2]*3 -(res_map[p3][1]+res_map[p4][1]+res_map[p5][1]+res_map[p6][1]+res_map[p7][1])+7
# print score
score_list.append([score,[p1,p2,p3,p4,p5,p6,p7]])
p+=1
print(p)
score_list = sorted(score_list , key=lambda x:x[0])
# for one in score_list[-1][1]:
# cv2.line(debug,(one,0),(one,36),(255,0,0),1)
# #
# cv2.imshow("one",debug)
# cv2.waitKey(0)
#
print("寻找最佳点",time.time()-t0)
return score_list[-1]
import sys
sys.path.append('../')
from . import recognizer as cRP
from . import niblack_thresholding as nt
def refineCrop(sections,width=16):
new_sections = []
for section in sections:
# cv2.imshow("section¡",section)
# cv2.blur(section,(3,3),3)
sec_center = np.array([section.shape[1]/2,section.shape[0]/2])
binary_niblack = nt.niBlackThreshold(section,17,-0.255)
#imagex,
contours, hierarchy = cv2.findContours(binary_niblack,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
boxs = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
ratio = w/float(h)
if ratio<1 and h>36*0.4 and y<16\
:
box = [x,y,w,h]
boxs.append([box,np.array([x+w/2,y+h/2])])
# cv2.rectangle(section,(x,y),(x+w,y+h),255,1)
# print boxs
dis_ = np.array([ ((one[1]-sec_center)**2).sum() for one in boxs])
if len(dis_)==0:
kernal = [0, 0, section.shape[1], section.shape[0]]
else:
kernal = boxs[dis_.argmin()][0]
center_c = (kernal[0]+kernal[2]/2,kernal[1]+kernal[3]/2)
w_2 = int(width/2)
h_2 = kernal[3]/2
if center_c[0] - w_2< 0:
w_2 = center_c[0]
new_box = [center_c[0] - w_2,kernal[1],width,kernal[3]]
# print new_box[2]/float(new_box[3])
if new_box[2]/float(new_box[3])>0.5:
# print "异常"
h = int((new_box[2]/0.35 )/2)
if h>35:
h = 35
new_box[1] = center_c[1]- h
if new_box[1]<0:
new_box[1] = 1
new_box[3] = h*2
section = section[int(new_box[1]):int(new_box[1]+new_box[3]), int(new_box[0]):int(new_box[0]+new_box[2])]
# cv2.imshow("section",section)
# cv2.waitKey(0)
new_sections.append(section)
# print new_box
return new_sections
def slidingWindowsEval(image):
windows_size = 16;
stride = 1
height= image.shape[0]
t0 = time.time()
data_sets = []
for i in range(0,image.shape[1]-windows_size+1,stride):
data = image[0:height,i:i+windows_size]
data = cv2.resize(data,(23,23))
# cv2.imshow("image",data)
data = cv2.equalizeHist(data)
data = data.astype(np.float)/255
data= np.expand_dims(data,3)
data_sets.append(data)
res = model2.predict(np.array(data_sets))
print("分割",time.time() - t0)
pin = res
p = 1 - (res.T)[1]
p = f.gaussian_filter1d(np.array(p,dtype=np.float),3)
lmin = l.argrelmax(np.array(p),order = 3)[0]
interval = []
for i in range(len(lmin)-1):
interval.append(lmin[i+1]-lmin[i])
if(len(interval)>3):
mid = get_median(interval)
else:
return []
pin = np.array(pin)
res = searchOptimalCuttingPoint(image,
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
Python车牌检测识别代码.7z (42个子文件)
lpdr.py 997B
Font
platech.ttf 14.01MB
model
plate_type.h5 147KB
char_judgement1.h5 39KB
model12.h5 69KB
char_chi_sim.h5 2.2MB
cascade.xml 348KB
cascade_lbp.xml 30KB
char_judgement.h5 295KB
ocr_plate_all_gru.h5 15.95MB
ocr_plate_all_w_rnn_2.h5 14.57MB
char_rec.h5 1.77MB
hyperlpr_py3
plateStructure.py 0B
cache.py 201B
e2emodel.py 994B
segmentation.py 9KB
config.py 128B
finemapping.py 5KB
typeDistinguish.py 2KB
pipline.py 7KB
deskew.py 3KB
colourDetection.py 3KB
recognizer.py 5KB
niblack_thresholding.py 531B
__pycache__
recognizer.cpython-37.pyc 4KB
deskew.cpython-37.pyc 3KB
__init__.cpython-37.pyc 175B
e2emodel.cpython-37.pyc 1KB
finemapping.cpython-37.pyc 3KB
finemapping_vertical.cpython-37.pyc 2KB
detect.cpython-37.pyc 2KB
typeDistinguish.cpython-37.pyc 2KB
pipline.cpython-37.pyc 5KB
segmentation.cpython-37.pyc 6KB
cache.cpython-37.pyc 478B
e2e.cpython-37.pyc 2KB
niblack_thresholding.cpython-37.pyc 671B
__init__.py 0B
detect.py 2KB
e2e.py 2KB
finemapping_vertical.py 2KB
precise.py 0B
共 42 条
- 1
资源评论
被上帝遗弃的幼仔
- 粉丝: 3
- 资源: 9
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 基于python部署fastsam的tensorrt模型源码.zip
- 【JavaScript】基于JSP的酒店客房管理系统源【源码+lw+部署文档+讲解】
- 综合电路设计-8赛道多功能运动秒表(vivado实现代码)
- java学习心得4000字.doc
- CarControl.pcf
- opencv-基于c++实现的opencv图像处理算法之gamma校正.zip
- java实习周记25篇.doc
- MI4450-VB一款SOP8封装N-Channel场效应MOS管
- opencv-基于c++实现的opencv图像处理算法之灰度变换算法.zip
- 实现用于Landmark检测+Robot跟踪的SLAM定位导航算法源码.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功