import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.optim import lr_scheduler
from torchvision.utils import save_image
import torch.utils.data as Data
import argparse
import os
parser = argparse.ArgumentParser(description='arguments')
parser.add_argument('--batch_size', type=int, default=32, help='')
parser.add_argument('--g_lr', type=float, default=0.001, help='lr of generator')
parser.add_argument('--d_lr', type=float, default=0.0001, help='lr of discriminator')
parser.add_argument('--dataset_path', type=str, default=r'C:\data\GAN\images/', help='the folder path of dataset')
parser.add_argument('--input_size', type=int, default=64 * 5, help='')
parser.add_argument('--input_channels', type=int, default=3, help='')
parser.add_argument('--gf', type=int, default=128, help='')
parser.add_argument('--df', type=int, default=128, help='')
parser.add_argument('--num_workers', type=int, default=0, help='')
parser.add_argument('--latent_dim', type=int, default=100, help='length of noise')
parser.add_argument('--beta1', type=float, default=0.5, help='')
parser.add_argument('--beta2', type=float, default=0.999, help='')
parser.add_argument('--n_epochs', type=int, default=900, help='')
os.makedirs('./models_small/', exist_ok=True)
parser.add_argument('--models_path', type=str, default='models_small/', help='')
src = './images_small/'
os.makedirs(src, exist_ok=True)
parser.add_argument('--images_path', type=str, default=src, help='Intermediate generated image')
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--k_disc', type=int, help="the number of discriminator's training per batch size", default=1)
parser.add_argument('--pre_train', action='store_true', default=True, help='for train')
# parser.add_argument('--pre_train', action='store_true', help='for train')
parser.add_argument('--g_model_path', type=str, default='./models_small/g_25.pth')
parser.add_argument('--d_model_path', type=str, default='./models_small/d_25.pth')
parser.add_argument('--n_test', type=int, default=10, help='the number of test images')
args = parser.parse_args()
device = torch.device('cuda:0' if (torch.cuda.is_available()) else 'cpu')
cuda = torch.cuda.is_available()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
cuda = True
dataset = datasets.ImageFolder(root=args.dataset_path,
transform=transforms.Compose([
transforms.Resize(args.input_size),
transforms.CenterCrop(320),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]
)
)
data_loader = Data.DataLoader(
dataset=dataset,
batch_size=args.batch_size,
num_workers=args.num_workers
)
class generator(nn.Module):
def __init__(self):
super(generator, self).__init__()
self.init_size = args.input_size // 16
self.l1 = nn.Sequential(nn.Linear(args.latent_dim, (args.gf * 8) * (self.init_size ** 2)))
self.main = nn.Sequential(
nn.BatchNorm2d(num_features=args.gf * 8),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=args.gf * 8, out_channels=args.gf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=512),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=args.gf * 4, out_channels=args.gf * 2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=args.gf * 2, out_channels=args.gf, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=args.gf, out_channels=args.input_channels, kernel_size=4, stride=2,
padding=1),
nn.Tanh()
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], args.gf * 8, self.init_size, self.init_size)
out = self.main(out)
return out
class discriminator(nn.Module):
def __init__(self):
super(discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(args.input_channels, args.df, 8, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.main1 = nn.Sequential(
nn.Conv2d(args.df, args.df * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.df * 2),
nn.LeakyReLU(0.2, inplace=True),
)
self.main2 = nn.Sequential(
nn.Conv2d(args.df * 2, args.df * 4, 6, 2, 1, bias=False),
nn.BatchNorm2d(args.df * 4),
nn.LeakyReLU(0.2, inplace=True),
)
self.main3 = nn.Sequential(
nn.Conv2d(args.df * 4, args.df * 8, 6, 2, 1, bias=False),
nn.BatchNorm2d(args.df * 8),
nn.LeakyReLU(0.2, inplace=True),
)
self.main4 = nn.Sequential(
nn.Conv2d(args.df * 8, args.df * 8, 3, 2, 1, bias=False),
nn.BatchNorm2d(args.df * 8),
nn.LeakyReLU(0.2, inplace=True),
)
self.main5 = nn.Sequential(
nn.Conv2d(args.df * 8, args.df * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.df * 8),
nn.LeakyReLU(0.2, inplace=True),
)
self.main6 = nn.Sequential(
# state size. (args.df*8) x 4 x 4
nn.Conv2d(args.df * 8, 1, 4, 1, 0, bias=False),
# state size. 1 x 1 x 1
nn.Sigmoid()
)
def forward(self, input):
# 3,320,320
out = self.main(input)
out1 = out.shape
# 128,160,160
out = self.main1(out)
out1 = out.shape
# torch.Size([1, 256, 80, 80])
out = self.main2(out)
out1 = out.shape
# torch.Size([1, 512, 40, 40])
out = self.main3(out)
out1 = out.shape
# torch.Size([1, 1024, 20, 20])
out = self.main4(out)
out1 = out.shape
out = self.main5(out)
out1 = out.shape
out = self.main6(out)
out1 = out.shape
out = out.view(out.shape[0], -1)
out1 = out.shape
return out
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
nn.init.constant_(m.bias.data, 0)
generator = generator()
discriminator = discriminator()
if args.pre_train:
generator.load_state_dict(torch.load(args.g_model_path))
discriminator.load_state_dict(torch.load(args.d_model_path))
else:
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
optimizer_g = torch.optim.AdamW(generator.parameters(), args.g_lr, betas=(args.beta1, args.beta2))
optimizer_d = torch.optim.AdamW(discriminator.parameters(), args.d_lr, betas=(args.beta1, args.beta2))
# 学习率每隔 10 个 epoch 变为原来的 0.1
lr_scheduler_Generator = lr_scheduler.StepLR(optimizer_g, step_size=10, gamma=0.1)
lr_scheduler_Discriminator = lr_scheduler.StepLR(optimizer_d, step_size=10, gamma=0.1)
loss = nn.BCELoss()
if cuda:
generator.cuda()
discriminator.cuda()
loss.cuda()
print('Starting Training Loop...')
generator.train()
discriminator.train()
for epoch in range(args.n_epochs):
for i, (imgs, _) in enumerate(data_loader):
valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)
real_imgs = Variable(imgs.type(Tens
没有合适的资源?快使用搜索试试~ 我知道了~
DCGAN生成艺术图片迷宫游戏图片.zip
共14个文件
py:14个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
0 下载量 79 浏览量
2023-05-13
22:38:47
上传
评论
收藏 22KB ZIP 举报
温馨提示
class generator(nn.Module): def __init__(self): super(generator, self).__init__() self.init_size = args.input_size // 16 self.l1 = nn.Sequential(nn.Linear(args.latent_dim, (args.gf * 8) * (self.init_size ** 2))) self.main = nn.Sequential( nn.BatchNorm2d(num_features=args.gf * 8), nn.ReLU(inplace=True), nn.ConvTranspose2d(in_channels=args.gf * 8, out_channels=args.gf * 4, kernel_size=4, stride=2, padding=1), nn.B
资源推荐
资源详情
资源评论
收起资源包目录
DCGAN生成艺术图片迷宫游戏图片.zip (14个子文件)
DCGAN
wgan_gp.py 8KB
utiles
change_txt_png.py 833B
find_path.py 2KB
change_png_txt.py 2KB
change_png_txt_judge.py 4KB
denoise.py 2KB
parser.py 0B
merge_mp4.py 2KB
wgan_div.py 7KB
wgan_gp_test.py 5KB
wgangp_train.py 8KB
dcgan_test320.py 5KB
dcgan_train320.py 9KB
dcgan_train64.py 9KB
共 14 条
- 1
资源评论
AI信仰者
- 粉丝: 1w+
- 资源: 143
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功