import os
import torch
#新增RunnerV2类
class RunnerV2(object):
def __init__(self, model, optimizer, metric, loss_fn):
self.model = model
self.optimizer = optimizer
self.loss_fn = loss_fn
self.metric = metric
#记录训练过程中的评价指标变化情况
self.train_scores = []
self.dev_scores = []
#记录训练过程中的损失函数变化情况
self.train_loss = []
self.dev_loss = []
def train(self, train_set, dev_set, **kwargs):
#传入训练轮数,如果没有传入值则默认为0
num_epochs = kwargs.get("num_epochs", 0)
#传入log打印频率,如果没有传入值则默认为100
log_epochs = kwargs.get("log_epochs", 100)
#传入模型保存路径,如果没有传入值则默认为"best_model.pdparams"
save_path = kwargs.get("save_path", "best_model.pdparams")
#梯度打印函数,如果没有传入则默认为"None"
print_grads = kwargs.get("print_grads", None)
#记录全局最优指标
best_score = 0
#进行num_epochs轮训练
for epoch in range(num_epochs):
X, y = train_set
#获取模型预测
logits = self.model(X)
#计算交叉熵损失
trn_loss = self.loss_fn(logits, y).item()
self.train_loss.append(trn_loss)
#计算评价指标
trn_score = self.metric(logits, y).item()
self.train_scores.append(trn_score)
#计算参数梯度
self.model.backward(y)
if print_grads is not None:
#打印每一层的梯度
print_grads(self.model)
#更新模型参数
self.optimizer.step()
dev_score, dev_loss = self.evaluate(dev_set)
#如果当前指标为最优指标,保存该模型
if dev_score > best_score:
self.save_model(save_path)
print(f"best accuracy performence has been updated: {best_score:.5f} --> {dev_score:.5f}")
best_score = dev_score
if epoch % log_epochs == 0:
print(f"[Train] epoch: {epoch}, loss: {trn_loss}, score: {trn_score}")
print(f"[Dev] epoch: {epoch}, loss: {dev_loss}, score: {dev_score}")
def evaluate(self, data_set):
X, y = data_set
#计算模型输出
logits = self.model(X)
#计算损失函数
loss = self.loss_fn(logits, y).item()
self.dev_loss.append(loss)
#计算评价指标
score = self.metric(logits, y).item()
self.dev_scores.append(score)
return score, loss
def predict(self, X):
return self.model(X)
def save_model(self, save_path):
torch.save(self.model.params, save_path)
def load_model(self, model_path):
self.model.params = torch.load(model_path)
class RunnerV3(object):
def __init__(self, model, optimizer, loss_fn, metric, **kwargs):
self.model = model
self.optimizer = optimizer
self.loss_fn = loss_fn
self.metric = metric # 只用于计算评价指标
# 记录训练过程中的评价指标变化情况
self.dev_scores = []
# 记录训练过程中的损失函数变化情况
self.train_epoch_losses = [] # 一个epoch记录一次loss
self.train_step_losses = [] # 一个step记录一次loss
self.dev_losses = []
# 记录全局最优指标
self.best_score = 0
def train(self, train_loader, dev_loader=None, **kwargs):
# 将模型切换为训练模式
self.model.train()
# 传入训练轮数,如果没有传入值则默认为0
num_epochs = kwargs.get("num_epochs", 0)
# 传入log打印频率,如果没有传入值则默认为100
log_steps = kwargs.get("log_steps", 100)
# 评价频率
eval_steps = kwargs.get("eval_steps", 0)
# 传入模型保存路径,如果没有传入值则默认为"best_model.pdparams"
save_path = kwargs.get("save_path", "best_model.pdparams")
custom_print_log = kwargs.get("custom_print_log", None)
# 训练总的步数
num_training_steps = num_epochs * len(train_loader)
if eval_steps:
if self.metric is None:
raise RuntimeError('Error: Metric can not be None!')
if dev_loader is None:
raise RuntimeError('Error: dev_loader can not be None!')
# 运行的step数目
global_step = 0
# 进行num_epochs轮训练
for epoch in range(num_epochs):
# 用于统计训练集的损失
total_loss = 0
for step, data in enumerate(train_loader):
X, y = data
# 获取模型预测
logits = self.model(X)
loss = self.loss_fn(logits, y) # 默认求mean
total_loss += loss
# 训练过程中,每个step的loss进行保存
self.train_step_losses.append((global_step,loss.item()))
if log_steps and global_step%log_steps==0:
print(f"[Train] epoch: {epoch}/{num_epochs}, step: {global_step}/{num_training_steps}, loss: {loss.item():.5f}")
# 梯度反向传播,计算每个参数的梯度值
loss.backward()
if custom_print_log:
custom_print_log(self)
# 小批量梯度下降进行参数更新
self.optimizer.step()
# 梯度归零
self.optimizer.clear_grad()
# 判断是否需要评价
if eval_steps>0 and global_step>0 and \
(global_step%eval_steps == 0 or global_step==(num_training_steps-1)):
dev_score, dev_loss = self.evaluate(dev_loader, global_step=global_step)
print(f"[Evaluate] dev score: {dev_score:.5f}, dev loss: {dev_loss:.5f}")
# 将模型切换为训练模式
self.model.train()
# 如果当前指标为最优指标,保存该模型
if dev_score > self.best_score:
self.save_model(save_path)
print(f"[Evaluate] best accuracy performence has been updated: {self.best_score:.5f} --> {dev_score:.5f}")
self.best_score = dev_score
global_step += 1
# 当前epoch 训练loss累计值
trn_loss = (total_loss / len(train_loader)).item()
# epoch粒度的训练loss保存
self.train_epoch_losses.append(trn_loss)
print("[Train] Training done!")
# 模型评估阶段,使用'paddle.no_grad()'控制不计算和存储梯度
@ torch.no_grad()
def evaluate(self, dev_loader, **kwargs):
assert self.metric is not None
# 将模型设置为评估模式
self.model.eval()
global_step = kwargs.get("global_step", -1)
# 用于统计训练集的损失
total_loss = 0
# 重置评价
self.metric.reset()
# 遍历验证集每个批次
for batch_id, data in enumerate(dev_loader):
X, y = data
# 计算模型输出
logits = self.model(X)
# 计算损失函数
loss = self.loss_fn(logits, y).item()
# 累积损失
total_loss += loss
# 累积评价
self.metric.update(logits, y)
dev_loss = (total_loss/len(dev_loader))
dev_score = self.metric.accumulate()
# 记录验证集loss
if global_step!=-1:
self.dev_losses.append((global_step, dev
没有合适的资源?快使用搜索试试~ 我知道了~
神经网络与深度学习实验所需nndl包(pytorch)
共8个文件
py:8个
需积分: 0 0 下载量 177 浏览量
2024-10-18
18:49:40
上传
评论
收藏 10KB ZIP 举报
温馨提示
神经网络与深度学习实验所需nndl包(pytorch)
资源推荐
资源详情
资源评论
收起资源包目录
nndl.zip (8个子文件)
nndl
opitimizer.py 2KB
__init__.py 87B
op.py 4KB
activation.py 308B
dataset.py 3KB
metric.py 3KB
runner.py 12KB
tools.py 2KB
共 8 条
- 1
资源评论
foreverno_BUG
- 粉丝: 65
- 资源: 1
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功