#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : layers_new.py
@Time : 2023/04/11 18:02:02
@Author : qinchengboy
@Version : 1.0
@Contact : 1441210996@qq.com
@License : (C)Copyright 2021-2024
@Desc : None
'''
# here put the import lib
import tensorflow as tf
import numpy as np
from tensorflow.python.training import moving_averages
# 一般在保存模型参数的时候,都会保存一份moving average,是取了不同迭代次数模型的移动平均,
# 移动平均后的模型往往在性能上会比最后一次迭代保存的模型要好一些
def conv2d(x, filter_size, in_filters, out_filters, strides_size, padding, name):
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
W = tf.get_variable(
'CW',
[filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.truncated_normal_initializer(stddev=np.sqrt(2.0/n)))
return tf.nn.conv2d(x, W, strides=[1, strides_size, strides_size, 1], padding=padding)
def transposeConv2d(x, filter_size, in_filters, out_filters, strides_size, out_shape, padding, name):
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
W = tf.get_variable(
'TW',
[filter_size, filter_size, out_filters, in_filters],
tf.float32,
initializer=tf.truncated_normal_initializer(stddev=np.sqrt(2.0/n)))
return tf.nn.conv2d_transpose(x, W, output_shape=out_shape, strides=[1, strides_size, strides_size, 1], padding=padding)
def dilateConv2d(x, filter_size, in_filters, out_filters, rate, padding, name):
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
W = tf.get_variable(
'DW',
[filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.truncated_normal_initializer(stddev=np.sqrt(2.0/n)))
return tf.nn.atrous_conv2d(x, W, rate, padding=padding)
def addBias(x, features, name):
with tf.variable_scope(name):
b = tf.get_variable('b', [features], tf.float32,
initializer=tf.constant_initializer(0.1))
return tf.nn.bias_add(x, b)
def swish(x, name='swish'):
with tf.name_scope(name):
x = tf.nn.sigmoid(x) * x
return x
def relu(x, leakiness=0.0):
"""
leaky ReLU激活函数,泄漏参数leakiness为0就是标准ReLU
"""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def batchNorm(x, mode, name):
"""
Batch Normalization批归一化:((x-mean)/var)*gamma+beta
"""
with tf.variable_scope(name):
# 输入通道维数
params_shape = [x.get_shape()[-1]]
beta = tf.get_variable('beta',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
# 为每个通道计算均值、标准差
mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
# 新建或建立测试阶段使用的batch均值、标准差
moving_mean = tf.get_variable('moving_mean',
params_shape, tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
moving_variance = tf.get_variable('moving_variance',
params_shape, tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
# 添加batch均值和标准差的更新操作(滑动平均)
decay = 0.9
train_mean = tf.assign(moving_mean, moving_mean * decay + mean * (1 - decay))
train_var = tf.assign(moving_variance, moving_variance * decay + variance * (1 - decay))
training = tf.constant(True, dtype=tf.bool)
if mode != 'train':
training = tf.constant(False, dtype=tf.bool)
# BN层:((x-mean)/var)*gamma+beta
def batch_statistics():
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-5, name='batch_norm')
def population_statistics():
return tf.nn.batch_normalization(x, moving_mean, moving_variance, beta, gamma, 0.001, name='batch_norm')
return tf.cond(training, batch_statistics, population_statistics)
def maxPool(x, max_size, strides_size, name):
return tf.nn.max_pool(x, ksize=[1, max_size, max_size, 1], strides=[1, strides_size, strides_size, 1], padding='VALID', name = name)
def resBlock(x, in_filter, out_filter,
stride, mode, activation_fun, name, padding='same'):
# Shortcut connection
with tf.variable_scope(name):
if in_filter == out_filter:
if stride == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [stride, stride], stride,
padding='same')
else:
# Considering maxpooling if stride > 1
shortcut = conv2d(x, 1, in_filter, out_filter, stride, 'SAME', 'conv2d_0')
shortcut = batchNorm(shortcut, mode, 'BN_0')
t = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
x = conv2d(t, 3, in_filter, out_filter, stride, 'VALID', 'conv2d_1')
x = batchNorm(x, mode, 'BN_1')
x = activation_fun(x)
t = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
x = conv2d(t, 3, out_filter, out_filter, 1, 'VALID', 'conv2d_2')
x = batchNorm(x, mode, 'BN_2')
# Considering add relu to x before addition
x = x + shortcut
x = activation_fun(x)
return x
def residual(x, in_filter, out_filter, stride, mode, activation_fun, name, activate_before_residual=False):
"""
残差单元模块
"""
with tf.variable_scope(name):
# 是否前置激活(取残差直连之前进行BN和ReLU)
if activate_before_residual:
with tf.variable_scope('shared_activation'):
# 先做BN和ReLU激活
x = batchNorm(x, mode, 'BN_1')
x = activation_fun(x)
# 获取残差直连
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
# 获取残差直连
orig_x = x
# 后做BN和ReLU激活
x = batchNorm(x, mode, 'BN_1')
x = activation_fun(x)
# 第1子层
with tf.variable_scope('sub_1'):
# 3x3卷积,使用输入步长,通道数(in_filter -> out_filter)
x = conv2d(x, 3, in_filter, out_filter, stride, 'SAME', 'conv2d_1')
# 第2子层
with tf.variable_scope('sub_2'):
# BN和ReLU激活
x = batchNorm(x, mode, 'BN_2')
x = activation_fun(x)
# 3x3卷积,步长为1,通道数不变(out_filter)
x = conv2d(x, 3, out_filter, out_filter, 1, 'SAME', 'conv2d_2')
# 合并残差层
with tf.variable_scope('sub_add'):
# 当通道数有变化时
if stride != 1:
# 均值池化,无补零
orig_x = tf.nn.avg_pool(orig_x, [1, stride, stride, 1], [1, stride,
没有合适的资源?快使用搜索试试~ 我知道了~
基于Tensorflow 的高分辨率遥感影像道路提取算法
共19个文件
py:13个
npz:1个
meta:1个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
2 下载量 196 浏览量
2023-04-12
16:52:04
上传
评论 2
收藏 475.25MB ZIP 举报
温馨提示
(1)构建U-Net结构形式,包含resnet、dilateBlock的全连接语义分割网络。 (2)损失函数使用了dice_coefficient与bce_loss的加权。 (3)从result/Epoch56_Iter385.jpg的训练结果图(第一列影像,第二列真值,第三列本算法效果)可以看到,路网基本检测,包括一下没有标记的路网(最后一张图片)都可以检测出来。 (4)train.py 普通训练模式,训练数据可以使用公开的遥感道路检测数据集。 (5)train_swith.py 使用Swish 的新激活函数进行训练模式。 (6)train_connection.py 相比(4)在骨干网络中对resnet网络输出后链接了dilateBlock模块。 (7)train_connection_multi.py 相比(6)使用多GPU训练模式。 (8)测试文件对应为test.py 与test_swish.py.
资源推荐
资源详情
资源评论
收起资源包目录
road-extraction.zip (19个子文件)
road-extraction
tools
module.py 3KB
model_test.py 388B
transitionPng.py 471B
test_swish.py 5KB
train_swish.py 4KB
core
framework_connection.py 5KB
dataSet.py 3KB
framework.py 7KB
layers_new.py 10KB
train_connection.py 4KB
model
checkpoint 111B
model_all_connection1.meta 27.86MB
model_all_connection1.index 25KB
model_all_connection1.data-00000-of-00001 391.4MB
pytorch_model.npz 118.77MB
train_connection_multi.py 5KB
train.py 4KB
test.py 5KB
result
Epoch56_Iter385.jpg 4.81MB
共 19 条
- 1
资源评论
倾城一少
- 粉丝: 668
- 资源: 62
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功