最后编辑于2020-08
基于paddle2.0alpha版本,我自己写了个小库,基于MMediting的结构,在尝试复现第一个模型的时候(DBPN, https://zhuanlan.zhihu.com/p/146688540),PSNR指标一直提升不上去 大概2点多个点,可以说是很差了。项目地址:https://github.com/Feynman1999/PDediting
主要代码:
class UPU(fluid.dygraph.Layer):
"""up-projection units"""
def __init__(self,
num_channels,
filter_size,
stride,
padding):
super(UPU, self).__init__()
self.deconv1 = DeConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
self.conv1 = ConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
self.deconv2 = DeConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
def forward(self, x1):
x2 = self.deconv1(x1)
x3 = self.conv1(x2)
x3 = x3 - x1
x4 = self.deconv2(x3)
return x4 + x2
class DPU(fluid.dygraph.Layer):
"""
down-projection units
"""
def __init__(self, num_channels, filter_size, stride, padding):
super(DPU, self).__init__()
self.conv1 = ConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
self.deconv1 = DeConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
self.conv2 = ConvBN(num_channels, num_channels, filter_size, stride, padding, norm = False, act='prelu')
def forward(self, x1):
x2 = self.conv1(x1)
x3 = self.deconv1(x2)
x3 = x3 - x1
x4 = self.conv2(x3)
return x4 + x2
class DBPN(Model):
"""DBPN network structure.
Paper:
Ref repo:
Args:
"""
def __init__(self,
in_channels=3,
out_channels=3,
n_0=256,
n_R=64,
iterations_num=10,
upscale_factor=4):
super(DBPN, self).__init__()
filter_size = upscale_factor + 4
stride = upscale_factor
padding = 2
self.iterations_num = iterations_num
self.conv1 = ConvBN(in_channels, n_0, filter_size=3, stride=1, padding=1, norm=False, act='prelu')
self.conv2 = ConvBN(n_0, n_R, filter_size=1, stride=1, padding=0, norm=False, act='prelu')
self.UPU = UPU(n_R, filter_size, stride, padding)
self.DPU = DPU(n_R, filter_size, stride, padding)
self.conv3 = ConvBN(n_R*iterations_num, out_channels, 3, 1, 1, norm=False, act=None)
def forward(self, x): # 参数个数应该和train_batch(listofdata, listoflabel)中listofdata长度相当
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
H_list = []
x = self.conv1(x)
x = self.conv2(x)
for _ in range(self.iterations_num-1):
H = self.UPU(x)
H_list.append(H)
x = self.DPU(H)
H_list.append(self.UPU(x))
x = paddle.concat(H_list, axis=1)
x = self.conv3(x)
return x
class ConvBN(fluid.dygraph.Layer):
"""docstring for Conv2D
use MSRA initializer for conv
use Normal initializer for BN
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
norm=True,
act='leaky_relu',
lrelufactor=0.02,
use_bias=True,
use_cudnn=True):
super(ConvBN, self).__init__()
self.conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
param_attr = fluid.ParamAttr(initializer=fluid.initializer.MSRAInitializer(uniform=False)),
use_cudnn=use_cudnn,
bias_attr=use_bias)
if norm:
self.bn = BatchNorm(
num_filters,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.NormalInitializer(1.0, 0.02)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.0)),
trainable_statistics=True)
self.lrelufactor = lrelufactor
self.norm = norm
self.act = act
if self.act == 'prelu':
self.prelu = fluid.PRelu(
mode='all',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.25)))
def forward(self, inputs):
conv = self.conv(inputs)
if self.norm:
conv = self.bn(conv)
if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.lrelufactor)
elif self.act == 'relu':
conv = fluid.layers.relu(conv)
elif self.act == 'prelu':
conv = self.prelu(conv)
else:
conv = conv
return conv
class DeConvBN(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=[0, 0],
outpadding=[0, 0, 0, 0],
act='leaky_relu',
norm=True,
lrelufactor=0.02,
use_bias=True):
super(DeConvBN, self).__init__()
self._deconv = Conv2DTranspose(
num_channels,
num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
param_attr = fluid.ParamAttr(initializer=fluid.initializer.MSRAInitializer(uniform=False)),
bias_attr=use_bias)
if norm:
self.bn = BatchNorm(
num_filters,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.NormalInitializer(1.0, 0.02)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.0)),
trainable_statistics=True)
self.outpadding = outpadding
self.lrelufactor = lrelufactor
self.use_bias = use_bias
self.norm = norm
self.act = act
if self.act == 'prelu':
self.prelu = fluid.PRelu(
mode='all',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.25)))
def forward(self, inputs):
conv = self._deconv(inputs)
conv = fluid.layers.pad2d(conv, paddings=self.outpadding, mode='reflect')
if self.norm:
conv = self.bn(conv)
if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.lrelufactor)
elif self.act == 'relu':
conv = fluid.layers.relu(conv)
elif self.act == 'prelu':
conv = self.prelu(conv)
else:
conv = conv
return conv
收藏
点赞
0
个赞
请登录后评论
TOP
切换版块