#-*- coding: UTF-8 -*-
"""
Environment: Keras2.0.5,Python2.7
Model: ResNet
"""
from __future__ import division
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import Input, Activation, Dense, Flatten
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
from keras.utils import plot_model
import six
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
def _bn_relu(input):
"""
Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""
Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,strides=strides, padding=padding,kernel_initializer=kernel_initializer,kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""
Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""
Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS], kernel_size=(1, 1), strides=(stride_width, stride_height), padding="valid", kernel_initializer="he_normal",kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""
Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides, is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""
Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3), strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""
Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1), strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions):
"""
Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# # Permute dimension order if necessary
# if K.image_dim_ordering() == 'tf':
# input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Load function from str if needed.
block_fn = _get_block(block_fn)
input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]), strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
dense = Dense(units=num_outputs, kernel_initializer="he_normal", activation="softmax")(flatten1)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(i
没有合适的资源?快使用搜索试试~ 我知道了~
基于Keras+Tensorflow搭建,提供ResNet50神经网络的图片分类平台.zip
共83个文件
py:21个
jpg:18个
png:7个
0 下载量 55 浏览量
2024-03-27
15:36:14
上传
评论
收藏 2.26MB ZIP 举报
温馨提示
人工智能-深度学习-tensorflow
资源推荐
资源详情
资源评论
收起资源包目录
基于Keras+Tensorflow搭建,提供ResNet50神经网络的图片分类平台.zip (83个子文件)
ImageNet-Api-master
doc
main.jpg 70KB
add-class.jpg 48KB
more_net.jpg 85KB
fit_net.jpg 48KB
home.jpg 560KB
retrain.jpg 43KB
contact.jpg 32KB
app.conf 57B
ImageNetApi.py 7KB
AlexNet
AlexNet.py 1KB
Parameter.py 831B
Resnet50
resnet50_train2.py 9KB
resnet50_train.py 2KB
resnet50_predict.py 765B
utils
CfgUtil.py 355B
__pycache__
RedisUtil.cpython-35.pyc 4KB
CfgUtil.cpython-35.pyc 661B
RedisUtil.py 3KB
templates
login.html 4KB
class.html 3KB
index.html 8KB
LeNet
Keras_LeNet.py 1KB
train.py 4KB
test.py 2KB
Test.py 1KB
VGG-13
VGG-13.py 2KB
ImageNetApi_online.py 6KB
favicon.ico 1KB
VGG-16
VGG-16.py 2KB
GoogleNet
GoogleNet.py 2KB
__pycache__
Parameter.cpython-35.pyc 1KB
static
js
main.js 6KB
anime.min.js 14KB
jquery-3.3.1.min.js 85KB
jquery-3.2.1.slim.min.js 68KB
jquery-ui.js 509KB
webfonts
fa-brands-400.woff 61KB
fa-brands-400.ttf 95KB
fa-brands-400.svg 493KB
fa-solid-900.woff 45KB
fa-solid-900.eot 96KB
fa-solid-900.ttf 95KB
fa-brands-400.woff2 53KB
fa-regular-400.woff2 12KB
fa-brands-400.eot 95KB
fa-regular-400.eot 30KB
fa-regular-400.woff 14KB
fa-regular-400.svg 104KB
fa-regular-400.ttf 30KB
fa-solid-900.woff2 36KB
fa-solid-900.svg 353KB
img
ui-icons_cc0000_256x240.png 5KB
Thumbs.db 4KB
ui-icons_777777_256x240.png 7KB
ui-icons_ffffff_256x240.png 6KB
pop-bg.jpg 37KB
gallery-img-06-tn.jpg 50KB
gallery-img-04-tn.jpg 45KB
welcome-2.jpg 35KB
ui-icons_444444_256x240.png 7KB
welcome-1.jpg 30KB
underline.png 1KB
ui-icons_555555_256x240.png 7KB
gallery-img-05-tn.jpg 27KB
gallery-img-03-tn.jpg 22KB
gallery-img-01-tn.jpg 24KB
gallery-img-02-tn.jpg 9KB
team.jpg 29KB
ui-icons_777620_256x240.png 5KB
pop-bg-bak.jpg 151KB
css
bootstrap.min.css 139KB
user_form.css 2KB
jquery-ui.css 36KB
fontawesome-all.min.css 33KB
tooplate-style.css 6KB
ZFNet
ZFNet.py 1KB
Resnet_50
Resnet50.py 9KB
train.py 5KB
__pycache__
train.cpython-35.pyc 4KB
test.cpython-35.pyc 2KB
Resnet50.cpython-35.pyc 9KB
test.py 2KB
Resnet-34
Resnet-34.py 3KB
共 83 条
- 1
资源评论
博士僧小星
- 粉丝: 1883
- 资源: 5878
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功