文章目录
- 训练所需数据
- 1.基于循环神经网络的长短时记忆网络
-
- 1.1 长短时记忆网络实现手写数字分类:
- 1.2 长短时记忆网络对文本的分类
训练所需数据
url:https://download.csdn.net/download/qq_34405401/12232606
1.基于循环神经网络的长短时记忆网络
1.1 长短时记忆网络实现手写数字分类:
-
一幅图像=一句话
-
一行=一个字
-
字转换为向量,称之为词向量
加载数据:
import struct
import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.utils.data
# 读取图片
def load_image_fromfile(filename):with open(filename, 'br') as fd:# 读取图像的信息header_buf = fd.read(16) # 16字节,4个int整数# 按照字节解析头信息(具体参考python SL的struct帮助)magic_, nums_, width_, height_ = struct.unpack('>iiii', header_buf) # 解析成四个整数:>表示大端字节序,i表示4字节整数# 保存成ndarray对象imgs_ = np.fromfile(fd, dtype=np.uint8)imgs_ = imgs_.reshape(nums_, height_, width_)return imgs_# 读取标签
def load_label_fromfile(filename):with open(filename, 'br') as fd:header_buf = fd.read(8) magic, nums = struct.unpack('>ii' ,header_buf) labels_ = np.fromfile(fd, np.uint8) return labels_# 读取训练集
train_x = load_image_fromfile("datasets/train-images.idx3-ubyte")
train_y = load_label_fromfile("datasets/train-labels.idx1-ubyte")
train_x = train_x.astype(np.float64)
train_y = train_y.astype(np.int64)
# 读取测试集
test_x = load_image_fromfile("datasets/t10k-images.idx3-ubyte")
test_y = load_label_fromfile("datasets/t10k-labels.idx1-ubyte")# 使用Torch的数据集管理工具管理
# 转换为Tensor
x = torch.Tensor(train_x).view(train_x.shape[0], 1, train_x.shape[1], train_x.shape[2]) # N,C,W,H
y = torch.LongTensor(train_y)t_x = torch.Tensor(test_x).view(test_x.shape[0], 1, test_x.shape[1], test_x.shape[2]) # N,C,W,H
t_y = torch.LongTensor(test_y)# 使用TensorDataSet封装数据与标签
train_dataset = torch.utils.data.TensorDataset(x, y)
test_dataset = torch.utils.data.TensorDataset(t_x, t_y)# 数据随机与切分器
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=2000) # 批次数量1000
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, shuffle=True, batch_size=10000) # 一个批次直接预测
print(train_loader,test_loader)
<torch.utils.data.dataloader.DataLoader object at 0x000001C7C51A9908> <torch.utils.data.dataloader.DataLoader object at 0x000001C7C5618D48>
构建长短时记忆网络
#循环神经网络LSTM--->分类器网络(Linear)---->log_softmax(sigmoid)=10
#4层网络:输入层、循环层、分类层(全连接层)、输出层
class RNN(torch.nn.Module):def __init__(self):super(RNN,self).__init__()#定义层self.rnn=torch.nn.LSTM(input_size=28*28, #输入数据大小,时序batch_first=True,#input(输入数据格式):batch_first=True:[batch,seq_len,input__size];input:batch_first=False:[seq_len,batch,input__size]hidden_size=64, #控制循环网络中u的权重的shape:input_size*hidden_sizenum_layers=1 #设置循环层的层数)#分类器层self.out=torch.nn.Linear(64,out_features=10)def forward(self,input):#决策输出o,(h_n,c_n)=self.rnn(input)#上面的 o = h_n[-1,:,:] o= self.out(h_n[-1,:,:])return o
#构造网络对象
net=RNN()#交叉熵损失函数,自动one-hot
loss_=torch.nn.CrossEntropyLoss()#优化器函数
optimizer=torch.optim.Adam(net.parameters(),lr=0.001)#迭代训练
epoch=5for n in range(epoch):for step,input_data in enumerate(train_loader):#得到特征集、标签集x_,y_=input_data#决策函数计算pred = net(x_.view(-1,1,28*28))#计算损失loss=loss_(pred,y_)#先梯度置0,再计算梯度optimizer.zero_grad()loss.backward()optimizer.step()with torch.no_grad():test_pred=net(t_x.view(-1,1,28*28))#把输出转换为概率prob=torch.nn.functional.softmax(test_pred,dim=1)class_pre=torch.argmax(prob,dim=1)acc=(class_pre==t_y).float().mean()print(F"轮数:{n}/{epoch},\t识别正确率:{acc*100:5.2f}%")
轮数:0/5, 识别正确率:71.75%
轮数:1/5, 识别正确率:83.05%
轮数:2/5, 识别正确率:86.82%
轮数:3/5, 识别正确率:88.60%
轮数:4/5, 识别正确率:89.63%
1.2 长短时记忆网络对文本的分类
import json
from gensim import models
from gensim import utils
import torch
import torch.nn as nn
import time
from collections import Counter
import numpy as np
from torch.utils.data import DataLoader, TensorDataset
with open("cveresult_allcwe.json", 'r', encoding='utf8') as f:line = f.readline()json_text = json.loads(line)
cve_descriptions = [] # 训练特征
cwe_ids = [] # 训练标签
total_word_list = [] #总的单词列表
max_len = 0
for i in range(len(json_text)):cve_description = json_text[str(i)]["description"] # 描述文本cwe_id = int(json_text[str(i)]["cweid"], 10) # id,10进制转换single_cve = utils.simple_preprocess(cve_description, True, 0, 100) # 句子转换为单词列表,True删除标点,0最小长度,100最大长度max_len = max(max_len, len(single_cve)) # 记录句子的最大单词数cve_descriptions.append(single_cve) # 每个记录的列表cwe_ids.append(cwe_id) # 每个标签的列表total_word_list += single_cve # 总的单词列表
len(cve_descriptions), len(cwe_ids)
(51850, 51850)
cwe_ids[0]
119
tag_size = max(cwe_ids) # 标签数
labels = np.array(cwe_ids) # 标签格式转换# 字典表(词汇表)
word_count = Counter(total_word_list) # 把单词列表转换为字典统计
sorted_words = word_count.most_common(len(total_word_list)) # 把字典按照出现频率的前几名排序(这类等于是全部单词排序)
vocab_to_int = {
w:i+1 for i, (w,c) in enumerate(sorted_words)} # 按照单词出现的频率,对单词编号,编号从1开始。
tag_size
943
# 短句 -> 暗语(数字:按照词汇表转换为可阅读自然语句)
cve_int = []
for cve in cve_descriptions: # 循环处理每条记录的数据:在上面已经转换为单词列表temp = [vocab_to_int[w] for w in cve] # 把记录的单词列表转换为整数列表cve_int.append(temp)
# 长短时记忆网络每次训练的时序长度是可以不一样的,但dataloader要求长度一样,因此得补全,这里补的是0
def padding(cve_int, seq_length):"""cve_int:需要对齐的数据seq_length:最终对齐的长度,这个参数需要计算处理(取最大)。"""features = np.zeros((len(cve_int), seq_length), dtype = int)for i, cve in enumerate(cve_int):cve_len = len(cve)if (cve_len <= seq_length):zeroes = list(np.zeros(seq_length - cve_len))new = zeroes + cveelse:new = cve[0:seq_length]features[i,:] = np.array(new)return features
cve_int = padding(cve_int, max_len)
# 切分训练集 + 测试集
split_frac = 0.8 # split_frac=0.8 80%做训练集 20%做测试集
len_feat = len(cve_int)
train_x = cve_int[0:int(split_frac * len_feat)]
train_y = labels[0:int(split_frac * len_feat)]
print(train_x.shape)
remaining_x = cve_int[int(split_frac * len_feat):]
remaining_y = labels[int(split_frac * len_feat):]valid_x = remaining_x[0:int(len(remaining_x) * 0.5)]
valid_y = remaining_y[0:int(len(remaining_y) * 0.5)]test_x = remaining_x[int(len(remaining_x) * 0.5):]
test_y = remaining_y[int(len(remaining_y) * 0.5):]
print(valid_x.shape)
# create tensor dataset
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y.astype(np.int64)))
valid_data = TensorDataset(torch.from_numpy(valid_x), torch.from_numpy(valid_y.astype(np.int64)))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y.astype(np.int64)))
(41480, 647)
(5185, 647)
# dataloaders
batch_size = 32 # shuffle data # 数据随机梯度算法的数据随机训练样本集
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
vocab_size = len(vocab_to_int) + 1 #词汇表大小
output_size = tag_size + 1 #输出标签值大小
embedding_dim = 100 # 词向量维数:0-1,hash格式(分量都是小数),词嵌入
hidden_dim = 256 #输出维数,隐藏的维度
num_layers = 2
句子数—批次 ; 词数----时序长度 ; 词向量----输入特征大小
- torch.Embebding(词嵌入:单热编码one-hot)
- 句子数 + 词数 + 词(单热向量)
- ????| – Embedding
- 批次 + 时序长度 + 输入特征大小
#hidden_dim 输出维数
#embedding_dim 输入维数
#利用工具word2vec词向量化
class CveLSTM(nn.Module):"""An LSTM model for text classification."""def __init__(self): # 构造器super(CveLSTM, self).__init__()self.embedding = nn.Embedding(vocab_size, embedding_dim) # 把词转换为向量,向量长度由第二个参数确定,第一个参数是整数的最大值self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers, dropout=0.5, batch_first=True)# dropoutself.dropout = nn.Dropout(0.5)# fully connectedself.linear = nn.Linear(hidden_dim, output_size)# softmaxself.softmax = nn.LogSoftmax(dim=1)def forward(self, x):embeds = self.embedding(x)out, (h_n,c_n) = self.lstm(embeds) # output,(h_n,c_n)=self.rnn(x)# stack up lstm outputsout = out.contiguous().view(-1, hidden_dim)# dropoutout = self.dropout(out)# fully connectedout = self.linear(out)# softmaxout = self.softmax(out)# reshape to be batch_size firstout = out.view(batch_size, -1, output_size)out = out[:, -1, :]return out
print("模型构建完毕")
net = CveLSTM()
print("开始训练.......")
模型构建完毕
开始训练.......
lr = 0.002 # 学习率
criterion = nn.NLLLoss() # 损失函数
optimizer = torch.optim.Adam(net.parameters(), lr=lr) # 优化器# 训练参数
epochs = 5
counter = 0
print_every = 1 # 100
clip = 5 # gradient clipping
for epoch in range(epochs): # epoch轮数print(F"第{epoch}次迭代")for inputs, labels in train_loader: # batch随机训练样本(这儿可以不使用DataLoader,使用自己的随机方案)counter += 1# zero accumulated gradientsnet.zero_grad()inputs = inputs.type(torch.LongTensor)output = net(inputs)#print(output.size())#print(labels.size())# loss, back propogationloss = criterion(output.squeeze(), labels)print("求导!")
# loss.backward(retain_graph=True)loss.backward()# prevent exploding gradient problem
# nn.utils.clip_grad_norm_(net.parameters(), clip)optimizer.step()# loss statistics# if (counter % print_every == 0):print("开始测试:", loss)with torch.no_grad():val_losses = []
# net.eval()for inputs, labels in valid_loader:print("数据集测试")inputs = inputs.type(torch.LongTensor)# if ((inputs.shape[0], inputs.shape[1]) != (batch_size, max_len)):# print('Validation - Input Shape Issue:', inputs.shape)# continueoutput = net(inputs)#print(val_losses)val_loss = criterion(output.squeeze(), labels)val_losses.append(val_loss.item())breakprint('up to here')
# net.train()print("Epoch: {}/{}...".format(epoch + 1, epochs),"Step: {}...".format(counter),"Loss: {:.6f}...".format(loss.item()),"Val Loss: {:.6f}".format(np.mean(val_losses)))
第0次迭代
求导!
开始测试: tensor(6.8440, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 1... Loss: 6.843983... Val Loss: 6.819246
求导!
开始测试: tensor(6.8132, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 2... Loss: 6.813174... Val Loss: 6.752560
求导!
开始测试: tensor(6.7733, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 3... Loss: 6.773276... Val Loss: 6.727256
求导!
开始测试: tensor(6.7224, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 4... Loss: 6.722388... Val Loss: 6.668998
求导!
开始测试: tensor(6.5440, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 5... Loss: 6.543982... Val Loss: 6.359863
求导!
开始测试: tensor(6.1929, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 6... Loss: 6.192864... Val Loss: 5.456901
求导!
开始测试: tensor(5.1637, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 7... Loss: 5.163733... Val Loss: 5.177830
求导!
开始测试: tensor(4.6265, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 8... Loss: 4.626533... Val Loss: 4.989816
求导!
开始测试: tensor(4.3682, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 9... Loss: 4.368167... Val Loss: 4.755396
求导!
开始测试: tensor(4.2208, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 10... Loss: 4.220848... Val Loss: 3.781343
求导!
开始测试: tensor(3.7233, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 11... Loss: 3.723265... Val Loss: 5.542442
求导!
开始测试: tensor(3.1832, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 12... Loss: 3.183162... Val Loss: 4.643360
求导!
开始测试: tensor(3.0996, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 13... Loss: 3.099598... Val Loss: 4.827140
求导!
开始测试: tensor(3.6958, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 14... Loss: 3.695777... Val Loss: 4.757569
求导!
开始测试: tensor(3.7372, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 15... Loss: 3.737175... Val Loss: 3.939897
求导!
开始测试: tensor(3.2170, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 16... Loss: 3.217014... Val Loss: 4.440625
求导!
开始测试: tensor(3.6288, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 17... Loss: 3.628799... Val Loss: 4.886866
求导!
开始测试: tensor(3.3856, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 18... Loss: 3.385629... Val Loss: 3.785084
求导!
开始测试: tensor(4.7284, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 19... Loss: 4.728401... Val Loss: 4.673436
求导!
开始测试: tensor(3.7995, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 20... Loss: 3.799525... Val Loss: 4.101975
求导!
开始测试: tensor(3.9177, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 21... Loss: 3.917746... Val Loss: 4.434158
求导!
开始测试: tensor(3.4021, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 22... Loss: 3.402131... Val Loss: 4.412697
求导!
开始测试: tensor(3.4955, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 23... Loss: 3.495534... Val Loss: 4.830415
求导!
开始测试: tensor(3.1694, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 24... Loss: 3.169399... Val Loss: 4.116441
求导!
开始测试: tensor(3.3455, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 25... Loss: 3.345518... Val Loss: 4.922965
求导!
开始测试: tensor(3.5217, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 26... Loss: 3.521721... Val Loss: 4.268460
求导!
开始测试: tensor(3.8382, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 27... Loss: 3.838210... Val Loss: 4.186338
求导!
开始测试: tensor(3.4944, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 28... Loss: 3.494431... Val Loss: 3.983193
求导!
开始测试: tensor(3.6777, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 29... Loss: 3.677652... Val Loss: 4.345277
求导!
开始测试: tensor(3.8541, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 30... Loss: 3.854072... Val Loss: 4.972967
求导!
开始测试: tensor(3.1174, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 31... Loss: 3.117369... Val Loss: 4.832112
求导!
开始测试: tensor(3.4164, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 32... Loss: 3.416413... Val Loss: 4.159414
求导!
开始测试: tensor(3.1326, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 33... Loss: 3.132564... Val Loss: 3.510100
求导!
开始测试: tensor(3.9538, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 34... Loss: 3.953828... Val Loss: 4.034079
求导!
开始测试: tensor(3.0833, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 35... Loss: 3.083271... Val Loss: 4.485598
求导!
开始测试: tensor(3.7346, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 36... Loss: 3.734619... Val Loss: 4.007890
求导!
开始测试: tensor(3.7722, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 37... Loss: 3.772228... Val Loss: 3.475314
求导!
开始测试: tensor(3.6765, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 38... Loss: 3.676529... Val Loss: 3.782885
求导!
开始测试: tensor(3.1155, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 39... Loss: 3.115495... Val Loss: 3.602079
求导!
开始测试: tensor(3.4494, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 40... Loss: 3.449436... Val Loss: 3.740333
求导!
开始测试: tensor(3.0363, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 41... Loss: 3.036332... Val Loss: 4.062439
求导!
开始测试: tensor(3.5278, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 42... Loss: 3.527784... Val Loss: 3.879452
求导!
开始测试: tensor(3.3844, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 43... Loss: 3.384447... Val Loss: 3.880713
求导!
开始测试: tensor(2.9312, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 44... Loss: 2.931222... Val Loss: 3.752324
求导!
开始测试: tensor(3.6319, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 45... Loss: 3.631895... Val Loss: 4.158114
求导!
开始测试: tensor(3.0187, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 46... Loss: 3.018739... Val Loss: 3.837497
求导!
开始测试: tensor(3.0525, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 47... Loss: 3.052483... Val Loss: 4.251548
求导!
开始测试: tensor(3.9879, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 48... Loss: 3.987886... Val Loss: 4.089860
求导!
开始测试: tensor(3.3816, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 49... Loss: 3.381562... Val Loss: 3.597782
求导!
开始测试: tensor(3.1278, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 50... Loss: 3.127832... Val Loss: 3.482532
求导!
开始测试: tensor(3.0527, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 51... Loss: 3.052741... Val Loss: 3.853009
求导!
开始测试: tensor(2.9937, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 52... Loss: 2.993709... Val Loss: 3.103098
求导!
开始测试: tensor(2.9680, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 53... Loss: 2.967967... Val Loss: 3.414759
求导!
开始测试: tensor(3.1424, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 54... Loss: 3.142409... Val Loss: 4.690981
求导!
开始测试: tensor(3.8007, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 55... Loss: 3.800670... Val Loss: 3.696944
求导!
开始测试: tensor(3.9489, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 56... Loss: 3.948906... Val Loss: 3.342162
求导!
开始测试: tensor(3.6957, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 57... Loss: 3.695713... Val Loss: 3.564886
求导!
开始测试: tensor(3.3080, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 58... Loss: 3.308048... Val Loss: 4.521679
求导!
开始测试: tensor(3.0711, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 59... Loss: 3.071075... Val Loss: 4.076993
求导!
开始测试: tensor(3.2676, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 60... Loss: 3.267620... Val Loss: 4.220370
求导!
开始测试: tensor(3.3452, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 61... Loss: 3.345212... Val Loss: 3.981930
求导!
开始测试: tensor(3.0562, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 62... Loss: 3.056196... Val Loss: 4.320114
求导!
开始测试: tensor(3.3542, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 63... Loss: 3.354188... Val Loss: 3.876805
求导!
开始测试: tensor(2.6930, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 64... Loss: 2.692996... Val Loss: 3.731207
求导!
开始测试: tensor(3.5211, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 65... Loss: 3.521127... Val Loss: 4.104652
求导!
开始测试: tensor(3.1747, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 66... Loss: 3.174712... Val Loss: 4.003822
求导!
开始测试: tensor(3.7503, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 67... Loss: 3.750307... Val Loss: 3.989321
求导!
开始测试: tensor(3.7186, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 68... Loss: 3.718604... Val Loss: 4.109231
求导!
开始测试: tensor(3.0709, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 69... Loss: 3.070942... Val Loss: 4.218253
求导!
开始测试: tensor(3.1420, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 70... Loss: 3.141999... Val Loss: 4.223192
求导!
开始测试: tensor(2.8350, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 71... Loss: 2.834994... Val Loss: 3.004678
求导!
开始测试: tensor(2.9652, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 72... Loss: 2.965157... Val Loss: 3.617105
求导!
开始测试: tensor(3.0898, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 73... Loss: 3.089787... Val Loss: 4.035259
求导!
开始测试: tensor(2.9054, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 74... Loss: 2.905355... Val Loss: 4.864602
求导!
开始测试: tensor(3.3798, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 75... Loss: 3.379814... Val Loss: 4.046399
求导!
开始测试: tensor(2.8628, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 76... Loss: 2.862808... Val Loss: 3.798018
求导!
开始测试: tensor(3.1650, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 77... Loss: 3.164959... Val Loss: 4.477554
求导!
开始测试: tensor(3.3793, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 78... Loss: 3.379323... Val Loss: 3.631831
求导!
开始测试: tensor(2.9683, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 79... Loss: 2.968302... Val Loss: 3.621438
求导!
开始测试: tensor(3.0510, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 80... Loss: 3.051014... Val Loss: 4.527668
求导!
开始测试: tensor(3.5863, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 81... Loss: 3.586307... Val Loss: 4.467587
求导!
开始测试: tensor(2.7682, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 82... Loss: 2.768183... Val Loss: 4.417587
求导!
开始测试: tensor(3.6565, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 83... Loss: 3.656489... Val Loss: 3.494012
求导!
开始测试: tensor(3.1761, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 84... Loss: 3.176142... Val Loss: 3.340024
求导!
开始测试: tensor(3.6023, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 85... Loss: 3.602287... Val Loss: 3.977831
求导!
开始测试: tensor(3.6353, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 86... Loss: 3.635325... Val Loss: 4.057468
求导!
开始测试: tensor(3.4591, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 87... Loss: 3.459082... Val Loss: 4.433074
求导!
开始测试: tensor(3.6209, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 88... Loss: 3.620943... Val Loss: 3.774534
求导!
开始测试: tensor(3.2954, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 89... Loss: 3.295404... Val Loss: 3.894302
求导!
开始测试: tensor(3.5637, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 90... Loss: 3.563653... Val Loss: 3.582911
求导!
开始测试: tensor(3.1979, grad_fn=<NllLossBackward>)
数据集测试
up to here
Epoch: 1/5... Step: 91... Loss: 3.197895... Val Loss: 4.469417
求导!
开始测试: tensor(3.1733, grad_fn=<NllLossBackward>)
数据集测试
训练太费时间了,就不训练完了!!!