import torch
from torchtext import data
from torchtext import datasets
from modules import LSTM
import random
from torch.autograd import Variable
import time
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize='spacy', include_lengths=True)
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state=random.seed(SEED))
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data,
max_size=MAX_VOCAB_SIZE,
vectors="glove.6B.100d",
unk_init=torch.Tensor.normal_)
LABEL.build_vocab(train_data)
BATCH_SIZE = 32
device = torch.device('cuda')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
sort_within_batch=True)
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout, pad_idx):
super().__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
# --------------------------------------------------------#
self.rnn1 = LSTM(embedding_dim, hidden_dim, layers = n_layers, sequences=False)
self.rnn2 = LSTM(embedding_dim, hidden_dim, layers = n_layers, sequences=False)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
# --------------------------------------------------------#
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.dropout(self.embedding(text))
packed_embedded = embedded
# ---------------------------------------------#
print(packed_embedded.size())
packed_embedded1 = packed_embedded.unbind(0)[::-1]
inp1 = packed_embedded1[0]
for i in range(1, len(packed_embedded1)):
inp1 = torch.cat((inp1, packed_embedded1[i]), dim=0)
x, y, z = packed_embedded.size()
inp2 = inp1.resize(x, y, z)
zeros = Variable(torch.zeros(embedded.size()[1], self.hidden_dim)).to(device)
initial_states = [(zeros, zeros)] * self.n_layers
input1 = packed_embedded.transpose(0, 1)
input2 = inp2.transpose(0, 1)
# Forward propagate RNN
print(input1.size())
out1, _ = self.rnn1(input1, initial_states)
out2, _ = self.rnn2(input2, initial_states)
hidden = self.dropout(torch.cat((out1, out2), dim=1))
print(hidden.size())
# ——————----------------------------------#
return self.fc(hidden)
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 1
DROPOUT = 0.5
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = RNN(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
DROPOUT,
PAD_IDX)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
pretrained_embeddings = TEXT.vocab.vectors
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
# round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() # convert into float for division
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
for batch in iterator:
optimizer.zero_grad()
text, text_length = batch.text
predictions = model(text.to(device)).squeeze(1).to('cpu')
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
optimizer.zero_grad()
epoch_loss = 0
epoch_acc = 0
for batch in iterator:
text, text_length = batch.text
predictions = model(text.to(device)).squeeze(1).to('cpu')
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
optimizer.zero_grad()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 40
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'LSTM.pt')
print(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc * 100:.2f}%')
pytorch自定义RNN.zip
需积分: 50 141 浏览量
2021-03-11
15:01:58
上传
评论 2
收藏 3KB ZIP 举报
皮皮宽
- 粉丝: 593
- 资源: 11
最新资源
- 基于jsp+mysql的JSP九宫格日记本源码.zip
- 基于jsp+mysql的JSP会员卡积分管理系统源码.zip
- 仿小米商城微信小程序源码+项目说明.zip
- 基于jsp+mysql的JSP个人日记本源码.zip
- 南溪CN No1 fps网络断网工具.zip
- 基于jsp+mysql的JSP宠物商城源码.zip
- C++开发基于Qt的音乐播放器的设计与实现项目源码+项目使用说明(毕业设计).zip
- 基于jsp+mysql+servlet的JSP图书馆图书管理系统源码.zip
- C++开发基于Qt的音乐播放器的设计与实现项目源码+项目使用说明(毕业设计).zip
- 【2024win11软件启动速度反应慢解决方法附工具】
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
评论0