import logging
import os
import pickle
import time
import itertools
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning import triplet_semihard_loss
import numpy as np
from .util import AttrDict
from .util import tf_config
from . import util
from . import spherical
from . import tfnp_compatibility as tfnp
from . import params
from .layers import * # !!!
from . import datasets # !!!
logger = logging.getLogger('logger')
def dup(x):
""" Return two references for input; useful when creating NNs and storing references to layers """
return [x, x]
def init_block(args, dset=None):
net = {}
if dset is None:
net['input'], curr = dup(tf.placeholder(args.dtype,
shape=(None, *get_indim(args)[1:])))
net['label'] = tf.placeholder('int64', shape=[None])
else:
# dset is tuple (iterator, init_ops); iterator returns input and label
net['input'], net['label'] = dset[0].get_next()
curr = net['input']
net['training'] = tf.placeholder('bool', shape=(), name='is_training')
return net, curr
def init_sphcnn(args):
method = args.transform_method
real = args.real_inputs
if method == 'naive':
fun = lambda *args, **kwargs: spherical.sph_harm_all(*args, **kwargs, real=real)
with tf.name_scope('harmonics_or_legendre'):
res = args.input_res
harmonics = [fun(res // (2**i), as_tfvar=True) for i in range(sum(args.pool_layers) + 1)]
return harmonics
def two_branch(args, convfun=sphconv, **kwargs):
""" Model, that splits input in two branches, and concatenate intermediate feature maps. """
method = args.transform_method
l_or_h = init_sphcnn(args)
net, curr = init_block(args, **kwargs)
assert tfnp.shape(curr)[-1] == 2
curr = [curr[..., 0][..., np.newaxis],
curr[..., 1][..., np.newaxis]]
# indices for legendre or harmonics
high = 0
low = 1
for i, (nf, pool, concat) in enumerate(zip(args.nfilters, args.pool_layers, args.concat_branches)):
for b in [0, 1]:
name = 'conv{}_b{}'.format(i, b)
if concat and b == 0:
# top branch also receives features from bottom branch
curr[b] = tf.concat(curr, axis=-1)
if not pool:
with tf.variable_scope(name):
net[name], curr[b] = dup(block(args, convfun, net['training'], curr[b], nf,
n_filter_params=args.n_filter_params,
harmonics_or_legendre=l_or_h[high],
method=method))
else:
with tf.variable_scope(name):
# force spectral pool in first layer if spectral input
spectral_pool = True if (args.spectral_input and i == 0) else args.spectral_pool
net[name], curr[b] = dup(block(args, convfun, net['training'], curr[b], nf,
n_filter_params=args.n_filter_params,
harmonics_or_legendre=l_or_h[high],
method=method,
spectral_pool=pool if spectral_pool else 0,
harmonics_or_legendre_low=l_or_h[low]))
if not spectral_pool:
# weighted avg pooling
if args.pool == 'wap':
curr[b] = area_weights(tf.layers.average_pooling2d(area_weights(curr[b]),
2*pool, 2*pool,
'same'),
invert=True)
elif args.pool == 'avg':
curr[b] = tf.layers.average_pooling2d(curr[b],
2*pool, 2*pool,
'same')
elif args.pool == 'max':
curr[b] = tf.layers.max_pooling2d(curr[b],
2*pool, 2*pool,
'same')
else:
raise ValueError('args.pool')
if pool:
high += 1
low += 1
# combine for final layer
curr = tf.concat(curr, axis=-1)
return sphcnn_afterconv(curr, net, args, l_or_h[high])
def sphcnn_afterconv(curr, net, args, l_or_h):
""" Part of model after convolutional layers;
should be common for different architectures. """
# normalize by area before computing the mean
with tf.name_scope('wsa'):
if args.weighted_sph_avg:
n = tfnp.shape(curr)[1]
phi, theta = util.sph_sample(n)
phi += np.diff(phi)[0]/2
curr *= np.sin(phi)[np.newaxis, np.newaxis, :, np.newaxis]
net['final_conv'] = curr
if 'complex' in args.model:
curr = tf.abs(curr)
nlin = 'relu'
else:
nlin = args.nonlin
# curr is last conv layer
with tf.name_scope('final_pool'):
net['gap'] = tf.reduce_mean(curr, axis=(1, 2))
if args.final_pool in ['max', 'all']:
net['max'] = tf.reduce_max(curr, axis=(1, 2))
if args.final_pool in ['magnitudes', 'all']:
net['final_coeffs'] = spherical.sph_harm_transform_batch(curr,
method=args.transform_method,
harmonics=l_or_h,
m0_only=False)
# use per frequency magnitudes
net['magnitudes'] = tf.contrib.layers.flatten(tf.reduce_sum(tf.square(net['final_coeffs']),
axis=(1, 3)))
net['magnitudes'] = tf.real(net['magnitudes'])
if args.final_pool != 'all':
curr = net[args.final_pool]
else:
curr = tf.concat([net['gap'], net['max'], net['magnitudes']], axis=-1)
if args.dropout:
curr = tf.nn.dropout(curr,
keep_prob=tf.cond(net['training'],
lambda: 0.5,
lambda: 1.0))
if not args.no_final_fc:
with tf.variable_scope('fc1') as scope:
net['fc1'], curr = dup(block(AttrDict({**args.__dict__,
'batch_norm': False,
'nonlin': nlin}),
tf.layers.dense, net['training'], curr, 64))
if args.dropout:
curr = tf.nn.dropout(curr,
keep_prob=tf.cond(net['training'],
lambda: 0.5,
lambda: 1.0))
for v in scope.trainable_variables():
tf.summary.histogram(v.name, v)
net['descriptor'] = curr
if args.triplet_loss:
norm_desc = tf.nn.l2_normalize(curr, dim=-1)
# this only works w/ fixed batch size
triplet_loss = triplet_semihard_loss(tf.cast(tf.reshape(net['label'],
(args.train_bsize,)),
'int32'),
norm_desc)
# NaNs may appear if bsize is small:
triplet_
A new deep distortion convolutional neural network for semantic
需积分: 0 53 浏览量
更新于2023-12-23
收藏 413KB ZIP 举报
xing hu , yi an, cheng shao, pan qin, Journal of Physics: Conference Series.研究畸变卷积,由拟合马路牙子,得出曲率,最后由曲率进一步得出卷积核偏置。
设计FE分支,由sobel图作为他的标签,提高对边界的分割精度。实验证明精度有提高。


accdgh
- 粉丝: 1
- 资源: 17
最新资源
- 538114a36f4815de38d10f977a2e7219.pdf
- mermaid代码转图片工具
- 基于PCA主成分分析的BP神经网络回归预测MATLAB代码详解-初学者上手指南,基于PCA主成分分析的BP神经网络回归预测MATLAB代码详解:数据预处理、KMO验证及神经网络预测,基于PCA主成分
- 基于分布式驱动电动汽车的路面附着系数估计:无迹与容积卡尔曼滤波方法的高效精准估算,基于分布式驱动电动汽车的路面附着系数估计:无迹与容积卡尔曼滤波方法的高效精准估算,基于分布式驱动电动汽车的路面附着系数
- CloudCompare版本v2.13完整源码
- 基于Python的Django-vue基于大数据技术的智慧居家养老服务平台源码-说明文档-演示视频.zip
- 基于TimeNet与TSMixer的先进时间序列预测模型:创新、优化与多变量处理的最佳选择,标题:TimesNet与TSMixer融合的先进时间序列预测模型:创新、高效且潜力无穷的预测新范式,Time
- 粒子群算法PSO优化随机森林RFR回归预测MATLAB代码:EXCEL数据读取与代码解析适用于初学者上手实践,教程粒子群算法(PSO)优化随机森林(RFR)的回归预测MATLAB代码,注释清楚+读
- Xray主动扫描报告1.html
- MYDB技术文档.zip
- 基于Python的Django-vue基于数据可视化的智慧社区内网平台设计与实现源码-说明文档-演示视频.zip
- 3月3日版代码-first-web.rar
- COMSOL多物理场耦合在瓦斯抽采中的应用案例研究:从理论模型到实践探索(涵盖钻孔瓦斯抽采、顺层抽采等),COMSOL瓦斯抽采案例:多物理场耦合的数值模拟与工程实践研究,涉及钻孔瓦斯抽采模型、复杂热流
- 基于Python的Django-vue基于协同过滤的儿童图书推荐系统实现源码-说明文档-演示视频.zip
- WordPress主题:Haida多功能响应式WordPress高级主题1.3.6最新版.zip
- 64位 WPS 支持的VBA插件