您好,登錄后才能下訂單哦!
這期內容當中小編將會給大家帶來有關pytorch調整優化器學習率的方法,文章內容豐富且以專業的角度為大家分析和敘述,閱讀完這篇文章希望大家可以有所收獲。
一般來說,在以SGD優化器作為基本優化器,然后根據epoch實現學習率指數下降,代碼如下:
step = [10,20,30,40] base_lr = 1e-4 sgd_opt = torch.optim.SGD(model.parameters(), lr=base_lr, nesterov=True, momentum=0.9) def adjust_lr(epoch): lr = base_lr * (0.1 ** np.sum(epoch >= np.array(step))) for params_group in sgd_opt.param_groups: params_group['lr'] = lr return lr
只需要在每個train的epoch之前使用這個函數即可。
for epoch in range(60): model.train() adjust_lr(epoch) for ind, each in enumerate(train_loader): mat, label = each ...
補充知識:Pytorch框架下應用Bi-LSTM實現汽車評論文本關鍵詞抽取
需要調用的模塊及整體Bi-lstm流程
import torch import pandas as pd import numpy as np from tensorflow import keras import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import TensorDataset import gensim from sklearn.model_selection import train_test_split class word_extract(nn.Module): def __init__(self,d_model,embedding_matrix): super(word_extract, self).__init__() self.d_model=d_model self.embedding=nn.Embedding(num_embeddings=len(embedding_matrix),embedding_dim=200) self.embedding.weight.data.copy_(embedding_matrix) self.embedding.weight.requires_grad=False self.lstm1=nn.LSTM(input_size=200,hidden_size=50,bidirectional=True) self.lstm2=nn.LSTM(input_size=2*self.lstm1.hidden_size,hidden_size=50,bidirectional=True) self.linear=nn.Linear(2*self.lstm2.hidden_size,4) def forward(self,x): w_x=self.embedding(x) first_x,(first_h_x,first_c_x)=self.lstm1(w_x) second_x,(second_h_x,second_c_x)=self.lstm2(first_x) output_x=self.linear(second_x) return output_x
將文本轉換為數值形式
def trans_num(word2idx,text): text_list=[] for i in text: s=i.rstrip().replace('\r','').replace('\n','').split(' ') numtext=[word2idx[j] if j in word2idx.keys() else word2idx['_PAD'] for j in s ] text_list.append(numtext) return text_list
將Gensim里的詞向量模型轉為矩陣形式,后續導入到LSTM模型中
def establish_word2vec_matrix(model): #負責將數值索引轉為要輸入的數據 word2idx = {"_PAD": 0} # 初始化 `[word : token]` 字典,后期 tokenize 語料庫就是用該詞典。 num2idx = {0: "_PAD"} vocab_list = [(k, model.wv[k]) for k, v in model.wv.vocab.items()] # 存儲所有 word2vec 中所有向量的數組,留意其中多一位,詞向量全為 0, 用于 padding embeddings_matrix = np.zeros((len(model.wv.vocab.items()) + 1, model.vector_size)) for i in range(len(vocab_list)): word = vocab_list[i][0] word2idx[word] = i + 1 num2idx[i + 1] = word embeddings_matrix[i + 1] = vocab_list[i][1] embeddings_matrix = torch.Tensor(embeddings_matrix) return embeddings_matrix, word2idx, num2idx
訓練過程
def train(model,epoch,learning_rate,batch_size,x, y, val_x, val_y): optimizor = optim.Adam(model.parameters(), lr=learning_rate) data = TensorDataset(x, y) data = DataLoader(data, batch_size=batch_size) for i in range(epoch): for j, (per_x, per_y) in enumerate(data): output_y = model(per_x) loss = F.cross_entropy(output_y.view(-1,output_y.size(2)), per_y.view(-1)) optimizor.zero_grad() loss.backward() optimizor.step() arg_y=output_y.argmax(dim=2) fit_correct=(arg_y==per_y).sum() fit_acc=fit_correct.item()/(per_y.size(0)*per_y.size(1)) print('##################################') print('第{}次迭代第{}批次的訓練誤差為{}'.format(i + 1, j + 1, loss), end=' ') print('第{}次迭代第{}批次的訓練準確度為{}'.format(i + 1, j + 1, fit_acc)) val_output_y = model(val_x) val_loss = F.cross_entropy(val_output_y.view(-1,val_output_y.size(2)), val_y.view(-1)) arg_val_y=val_output_y.argmax(dim=2) val_correct=(arg_val_y==val_y).sum() val_acc=val_correct.item()/(val_y.size(0)*val_y.size(1)) print('第{}次迭代第{}批次的預測誤差為{}'.format(i + 1, j + 1, val_loss), end=' ') print('第{}次迭代第{}批次的預測準確度為{}'.format(i + 1, j + 1, val_acc)) torch.save(model,'./extract_model.pkl')#保存模型
主函數部分
if __name__ =='__main__': #生成詞向量矩陣 word2vec = gensim.models.Word2Vec.load('./word2vec_model') embedding_matrix,word2idx,num2idx=establish_word2vec_matrix(word2vec)#輸入的是詞向量模型 # train_data=pd.read_csv('./數據.csv') x=list(train_data['文本']) # 將文本從文字轉化為數值,這部分trans_num函數你需要自己改動去適應你自己的數據集 x=trans_num(word2idx,x) #x需要先進行填充,也就是每個句子都是一樣長度,不夠長度的以0來填充,填充詞單獨分為一類 # #也就是說輸入的x是固定長度的數值列表,例如[50,123,1850,21,199,0,0,...] #輸入的y是[2,0,1,0,0,1,3,3,3,3,3,.....] #填充代碼你自行編寫,以下部分是針對我的數據集 x=keras.preprocessing.sequence.pad_sequences( x,maxlen=60,value=0,padding='post', ) y=list(train_data['BIO數值']) y_text=[] for i in y: s=i.rstrip().split(' ') numtext=[int(j) for j in s] y_text.append(numtext) y=y_text y=keras.preprocessing.sequence.pad_sequences( y,maxlen=60,value=3,padding='post', ) # 將數據進行劃分 fit_x,val_x,fit_y,val_y=train_test_split(x,y,train_size=0.8,test_size=0.2) fit_x=torch.LongTensor(fit_x) fit_y=torch.LongTensor(fit_y) val_x=torch.LongTensor(val_x) val_y=torch.LongTensor(val_y) #開始應用 w_extract=word_extract(d_model=200,embedding_matrix=embedding_matrix) train(model=w_extract,epoch=5,learning_rate=0.001,batch_size=50, x=fit_x,y=fit_y,val_x=val_x,val_y=val_y)#可以自行改動參數,設置學習率,批次,和迭代次數 w_extract=torch.load('./extract_model.pkl')#加載保存好的模型 pred_val_y=w_extract(val_x).argmax(dim=2)
上述就是小編為大家分享的pytorch調整優化器學習率的方法了,如果剛好有類似的疑惑,不妨參照上述分析進行理解。如果想知道更多相關知識,歡迎關注億速云行業資訊頻道。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。