营销网站建设的公司,网页升级请记住新域名,山东泰安房价,公明做网站多少钱文章目录 简介数据集环境要求实验代码实验结果参考来源 简介
本文使用PyTorch自带的transformer层进行机器翻译#xff1a;从德语翻译为英语。从零开始实现Transformer请参阅PyTorch从零开始实现Transformer#xff0c;以便于获得对Transfomer更深的理解。
数据集
Multi30… 文章目录 简介数据集环境要求实验代码实验结果参考来源 简介
本文使用PyTorch自带的transformer层进行机器翻译从德语翻译为英语。从零开始实现Transformer请参阅PyTorch从零开始实现Transformer以便于获得对Transfomer更深的理解。
数据集
Multi30k
环境要求
使用torch torchtextspacy其中spacy用来分词。另外spacy要求在虚拟环境中下载语言模型以便于进行tokenize分词
# To install spacy languages do:
python -m spacy download en_core_web_sm
python -m spacy download de_core_news_sm实验代码
代码来源请参考下方的GitHub链接
transformer_translation.py文件
# Bleu score 32.02
import torch
import torch.nn as nn
import torch.optim as optim
import spacy
from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint
from torch.utils.tensorboard import SummaryWriter
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
To install spacy languages do:
python -m spacy download en_core_web_sm
python -m spacy download de_core_news_smspacy_ger spacy.load(de_core_news_sm)
spacy_eng spacy.load(en_core_web_sm)def tokenize_ger(text):return [tok.text for tok in spacy_ger.tokenizer(text)]# 将英语进行分词
def tokenize_eng(text):return [tok.text for tok in spacy_eng.tokenizer(text)]german Field(tokenizetokenize_ger, lowerTrue, init_tokensos, eos_tokeneos)english Field(tokenizetokenize_eng, lowerTrue, init_tokensos, eos_tokeneos
)train_data, valid_data, test_data Multi30k.splits(exts(.de, .en), fields(german, english)
)german.build_vocab(train_data, max_size10000, min_freq2)
english.build_vocab(train_data, max_size10000, min_freq2)class Transformer(nn.Module):def __init__(self,embedding_size,src_vocab_size,trg_vocab_size,src_pad_idx,num_heads,num_encoder_layers,num_decoder_layers,forward_expansion,dropout,max_len,device,):super(Transformer, self).__init__()self.src_word_embedding nn.Embedding(src_vocab_size, embedding_size)self.src_position_embedding nn.Embedding(max_len, embedding_size)self.trg_word_embedding nn.Embedding(trg_vocab_size, embedding_size)self.trg_position_embedding nn.Embedding(max_len, embedding_size)self.device deviceself.transformer nn.Transformer(embedding_size,num_heads,num_encoder_layers,num_decoder_layers,forward_expansion,dropout,)self.fc_out nn.Linear(embedding_size, trg_vocab_size)self.dropout nn.Dropout(dropout)self.src_pad_idx src_pad_idxdef make_src_mask(self, src):src_mask src.transpose(0, 1) self.src_pad_idx# (N, src_len)return src_mask.to(self.device)def forward(self, src, trg):src_seq_length, N src.shapetrg_seq_length, N trg.shapesrc_positions (torch.arange(0, src_seq_length).unsqueeze(1).expand(src_seq_length, N).to(self.device))trg_positions (torch.arange(0, trg_seq_length).unsqueeze(1).expand(trg_seq_length, N).to(self.device))embed_src self.dropout((self.src_word_embedding(src) self.src_position_embedding(src_positions)))embed_trg self.dropout((self.trg_word_embedding(trg) self.trg_position_embedding(trg_positions)))src_padding_mask self.make_src_mask(src)trg_mask self.transformer.generate_square_subsequent_mask(trg_seq_length).to(self.device)out self.transformer(embed_src,embed_trg,src_key_padding_masksrc_padding_mask,tgt_masktrg_mask,)out self.fc_out(out)return out# Were ready to define everything we need for training our Seq2Seq model
device torch.device(cuda if torch.cuda.is_available() else cpu)
print(device)
load_model False
save_model True# Training hyperparameters
num_epochs 10
learning_rate 3e-4
batch_size 32# Model hyperparameters
src_vocab_size len(german.vocab)
trg_vocab_size len(english.vocab)
embedding_size 512
num_heads 8
num_encoder_layers 3
num_decoder_layers 3
dropout 0.10
max_len 100
forward_expansion 4
src_pad_idx english.vocab.stoi[pad]# Tensorboard to get nice loss plot
writer SummaryWriter(runs/loss_plot)
step 0train_iterator, valid_iterator, test_iterator BucketIterator.splits((train_data, valid_data, test_data),batch_sizebatch_size,sort_within_batchTrue,sort_keylambda x: len(x.src),devicedevice,
)model Transformer(embedding_size,src_vocab_size,trg_vocab_size,src_pad_idx,num_heads,num_encoder_layers,num_decoder_layers,forward_expansion,dropout,max_len,device,
).to(device)optimizer optim.Adam(model.parameters(), lrlearning_rate)scheduler torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor0.1, patience10, verboseTrue
)pad_idx english.vocab.stoi[pad]
criterion nn.CrossEntropyLoss(ignore_indexpad_idx)if load_model:load_checkpoint(torch.load(my_checkpoint.pth.tar), model, optimizer)# a, horse, is, walking, under, a, bridge, next, to, a, boat, .
sentence ein pferd geht unter einer brücke neben einem boot.for epoch in range(num_epochs):print(f[Epoch {epoch} / {num_epochs}])if save_model:checkpoint {state_dict: model.state_dict(),optimizer: optimizer.state_dict(),}save_checkpoint(checkpoint)model.eval()translated_sentence translate_sentence(model, sentence, german, english, device, max_length50)print(fTranslated example sentence: \n {translated_sentence})model.train()losses []for batch_idx, batch in enumerate(train_iterator):# Get input and targets and get to cudainp_data batch.src.to(device)target batch.trg.to(device)# Forward propoutput model(inp_data, target[:-1, :])# Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss# doesnt take input in that form. For example if we have MNIST we want to have# output to be: (N, 10) and targets just (N). Here we can view it in a similar# way that we have output_words * batch_size that we want to send in into# our cost function, so we need to do some reshapin.# Lets also remove the start token while were at itoutput output.reshape(-1, output.shape[2])target target[1:].reshape(-1)optimizer.zero_grad()loss criterion(output, target)losses.append(loss.item())# Back proploss.backward()# Clip to avoid exploding gradient issues, makes sure grads are# within a healthy rangetorch.nn.utils.clip_grad_norm_(model.parameters(), max_norm1)# Gradient descent stepoptimizer.step()# plot to tensorboardwriter.add_scalar(Training loss, loss, global_stepstep)step 1mean_loss sum(losses) / len(losses)scheduler.step(mean_loss)# running on entire test data takes a while
score bleu(test_data[1:100], model, german, english, device)
print(fBleu score {score * 100:.2f})
utils.py文件
import torch
import spacy
from torchtext.data.metrics import bleu_score
import sysdef translate_sentence(model, sentence, german, english, device, max_length50):# Load german tokenizerspacy_ger spacy.load(de_core_news_sm)# Create tokens using spacy and everything in lower case (which is what our vocab is)if type(sentence) str:tokens [token.text.lower() for token in spacy_ger(sentence)]else:tokens [token.lower() for token in sentence]# Add SOS and EOS in beginning and end respectivelytokens.insert(0, german.init_token)tokens.append(german.eos_token)# Go through each german token and convert to an indextext_to_indices [german.vocab.stoi[token] for token in tokens]# Convert to Tensorsentence_tensor torch.LongTensor(text_to_indices).unsqueeze(1).to(device)outputs [english.vocab.stoi[sos]]for i in range(max_length):trg_tensor torch.LongTensor(outputs).unsqueeze(1).to(device)with torch.no_grad():output model(sentence_tensor, trg_tensor)best_guess output.argmax(2)[-1, :].item()outputs.append(best_guess)if best_guess english.vocab.stoi[eos]:breaktranslated_sentence [english.vocab.itos[idx] for idx in outputs]# remove start tokenreturn translated_sentence[1:]def bleu(data, model, german, english, device):targets []outputs []for example in data:src vars(example)[src]trg vars(example)[trg]prediction translate_sentence(model, src, german, english, device)prediction prediction[:-1] # remove eos tokentargets.append([trg])outputs.append(prediction)return bleu_score(outputs, targets)def save_checkpoint(state, filenamemy_checkpoint.pth.tar):print( Saving checkpoint)torch.save(state, filename)def load_checkpoint(checkpoint, model, optimizer):print( Loading checkpoint)model.load_state_dict(checkpoint[state_dict])optimizer.load_state_dict(checkpoint[optimizer])
实验结果
对下面的这句德文进行翻译
sentence ein pferd geht unter einer brücke neben einem boot.翻译结果为
[a, horse, walks, underneath, a, bridge, next, to, a, boat, ., eos]Bleu score为31.73
跑了10个Epoch结果如下所示 # ResultLoading checkpoint
[Epoch 0 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, under, a, boat, next, to, a, boat, ., eos]
[Epoch 1 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, underneath, a, bridge, beside, a, boat, ., eos]
[Epoch 2 / 10]Saving checkpoint
Translated example sentence: [a, horse, is, walking, beside, a, boat, under, a, bridge, ., eos]
[Epoch 3 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, under, a, bridge, next, to, a, boat, ., eos]
[Epoch 4 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, under, a, bridge, next, to, a, boat, ., eos]
[Epoch 5 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, beside, a, boat, next, to, a, boat, ., eos]
[Epoch 6 / 10]Saving checkpoint
Translated example sentence: [a, horse, is, walking, underneath, a, bridge, under, a, boat, ., eos]
[Epoch 7 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, under, a, bridge, next, to, a, boat, ., eos]
[Epoch 8 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, beneath, a, bridge, next, to, a, boat, ., eos]
[Epoch 9 / 10]Saving checkpoint
Translated example sentence: [a, horse, walks, underneath, a, bridge, next, to, a, boat, ., eos]
Bleu score 31.73参考来源
[1] https://blog.csdn.net/weixin_43632501/article/details/98731800 [2] https://www.youtube.com/watch?vM6adRGJe5cQ [3] https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/more_advanced/seq2seq_transformer/seq2seq_transformer.py [4] https://blog.csdn.net/g11d111/article/details/100103208