Skip to content
Snippets Groups Projects
Commit 4c0ed80c authored by Konstantin Julius Lotzgeselle's avatar Konstantin Julius Lotzgeselle :speech_balloon:
Browse files
parents c5d7a0a9 e9945170
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id:initial_id tags:
``` python
import torch
import torch.nn as nn
import torch.nn.functional as F
import seaborn as sns
from matplotlib import pyplot as plt
from pathlib import Path
```
%% Cell type:markdown id:d8d7f32150682efd tags:
## 0. Prepare the data
%% Cell type:code id:f7c39c06ce3a14db tags:
``` python
def load_data() -> list[str]:
with open("data/training-data/eup/europarl-v7.de-en.de", "r", encoding="utf8") as f:
data_de = [line.rstrip("\n") for line in f]
with open("data/training-data/eup/europarl-v7.de-en.en", "r", encoding="utf8") as f:
data_en = [line.rstrip("\n") for line in f]
ltd = set() # save lines to delete later
for i in range(max(len(data_de), len(data_en))):
# Move sentence to next line if line is empty other file
if data_de[i] == "":
data_en[i+1] = data_en[i] + " " + data_en[i+1]
ltd.add(i)
if data_en[i] == "":
data_de[i+1] = data_de[i] + " " + data_de[i+1]
ltd.add(i)
# Remove lines, where difference in words is > 40%
if abs(count_words(data_de[i]) - count_words(data_en[i])) / (max(count_words(data_de[i]), count_words(data_en[i])) + 1) > 0.4:
ltd.add(i)
# Remove lines < 3 words or > 25 words
if max(count_words(data_de[i]), count_words(data_en[i])) < 3 or max(count_words(data_de[i]), count_words(data_en[i])) > 25:
ltd.add(i)
temp_de = [l for i, l in enumerate(data_de) if i not in ltd]
data_de = temp_de
temp_en = [l for i, l in enumerate(data_en) if i not in ltd]
data_en = temp_en
print(len(data_de),len(data_en))
# Print 3 random sentence pairs
ix = torch.randint(low=0, high=max(len(data_de), len(data_en)), size=(3, ))
for i in ix:
print(f"Zeile: {i}\nDeutsch: {data_de[i]}\nEnglish: {data_en[i]}\n")
print(f"\nNumber of lines: {len(data_de), len(data_en)}")
return data_de, data_en
def count_words(string: str) -> int:
return len(string.split())
source, target = load_data()
```
%% Output
1046809 1046809
Zeile: 961261
Deutsch: Es wird in diesem Abschnitt allerdings nicht erwähnt, daß der Finanzkontrolleur vor dem Ereignis von dem Gedanken, die automatische Mittelübertragung anzuwenden, abriet.
English: But the paragraph does not mention that the financial controller had, before the event, advised against the idea of using the automatic carry-over procedure.
Zeile: 780740
Deutsch: Ich habe gegen die Autorisierung zur Entlastung des Gesamthaushaltsplans der Europäischen Arzneimittel-Agentur gestimmt.
English: I voted against the authorisation to discharge the 2009 general budget of the European Medicines Agency.
Zeile: 343591
Deutsch: Wir haben diesen Anspruch geteilt, aber an diesem Anspruch müssen wir den Rat dann natürlich auch messen.
English: While we had a share in making this commitment, it is also, of course, by it that we have to judge the Council.
Number of lines: (1046809, 1046809)
%% Cell type:markdown id:f2beddcc4122495a tags:
## 1. Text tokenization
%% Cell type:code id:d8ccbafa97fba573 tags:
``` python
# set up the tokenizer
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.processors import TemplateProcessing
# setting the unknown token (e.g. for emojis)
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
# adding special tokens
# [UNK] : unknown word/token
# [CLS] : starting token (new sentence sequence)
# [SEP] : separator for chaining multiple sentences
# [PAD] : padding needed for encoder input
# [MASK] : bad words!?
trainer = BpeTrainer(vocab_size=50000, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
# set up the pre-tokenizer -> this ensures, that the maximal token length is one word
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
```
%% Cell type:code id:55cbac65a50a0199 tags:
``` python
tokenizer.train(["data/training-data/eup/europarl-v7.de-en.de", "data/training-data/eup/europarl-v7.de-en.en"], trainer)
# configure post processing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", tokenizer.token_to_id("[CLS]")),
("[SEP]", tokenizer.token_to_id("[SEP]")),
],
)
vocab_size = tokenizer.get_vocab_size()
```
%% Cell type:markdown id:9c0f853775a802ec tags:
## 2. Prepare the training data
%% Cell type:code id:2e4dc87ce98b6cdd tags:
``` python
# Prepare training batch
def training_data(batch_size: int = 10, max_tokens: int = 50) -> tuple[torch.tensor, torch.tensor]:
x_training_data = []
y_training_data = []
# select random sentences
batch_indices = torch.randint(0, len(source), (batch_size, ))
for idx in batch_indices:
x_training_data.append(target[idx])
y_training_data.append(source[idx])
# tokenize data
tokenizer.enable_padding(pad_id=3)
x_training_data = tokenizer.encode_batch(x_training_data)
tokenizer.enable_padding(pad_id=3, length=max_tokens)
y_training_data = tokenizer.encode_batch(y_training_data)
# extract ids for every sequence
for i in range(len(batch_indices)):
x_training_data[i] = x_training_data[i].ids
y_training_data[i] = y_training_data[i].ids
# 'tensorfy' x data
x_training_data = torch.tensor(x_training_data)
# 'tensorfy' & one hot encode y data
#y_training_data = F.one_hot(torch.tensor(y_training_data), num_classes=vocab_size)
y_training_data = torch.tensor(y_training_data)
return x_training_data, y_training_data
print(training_data())
```
%% Output
(tensor([[ 1, 556, 668, 3111, 804, 360, 2437, 346, 1103, 14738,
1292, 363, 1340, 16, 7514, 16, 1860, 346, 1552, 477,
346, 565, 363, 346, 821, 18, 2, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 556, 18578, 14776, 1583, 1557, 37474, 343, 4710, 16,
1313, 1478, 11, 24540, 363, 42482, 440, 7385, 19072, 18,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 721, 342, 11, 3808, 11, 360, 68, 610, 17,
1099, 16848, 708, 11, 414, 11, 360, 346, 4844, 362,
9022, 1820, 346, 7131, 362, 739, 18, 2, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 37, 7518, 362, 585, 4050, 342, 12638, 16, 708,
344, 668, 359, 1557, 764, 13143, 1251, 342, 9825, 531,
346, 1743, 1881, 18, 2, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 3469, 4719, 360, 1266, 20825, 386, 1722, 26460, 18,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 6384, 16, 344, 342, 68, 1411, 362, 7261, 5725,
11, 2505, 1192, 18, 2714, 689, 470, 359, 15790, 531,
346, 2138, 362, 346, 2745, 1350, 18, 2, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 1139, 16, 1266, 16, 765, 440, 6275, 386, 694,
2944, 2527, 360, 632, 360, 993, 363, 360, 3216, 2944,
335, 346, 694, 1350, 18, 2, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 45, 7581, 20992, 5098, 352, 4647, 493, 16, 68,
1379, 17, 5073, 7946, 508, 17, 2003, 362, 6128, 2985,
16, 7932, 1266, 386, 7342, 343, 346, 11092, 16, 68,
1379, 17, 5073, 16, 963, 7946, 32246, 18, 2],
[ 1, 502, 1098, 771, 342, 401, 343, 346, 1124, 980,
380, 440, 8035, 477, 346, 821, 936, 340, 2536, 335,
3583, 18, 2, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 1380, 3091, 440, 1233, 3516, 386, 346, 4635, 362,
871, 3401, 10302, 10131, 16, 708, 346, 425, 17, 3152,
520, 1310, 4410, 546, 38980, 346, 1403, 1981, 18, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3]]), tensor([[ 1, 596, 4569, 410, 1054, 852, 2409, 6581, 369, 471,
352, 619, 369, 469, 897, 1359, 2130, 5859, 1996, 8030,
18, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 596, 1071, 1330, 782, 1091, 383, 1979, 8592, 340,
372, 4502, 436, 16, 427, 40257, 369, 38283, 340, 17022,
1208, 383, 10522, 18, 5282, 2634, 2989, 4684, 18, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 7907, 16, 25310, 17914, 2260, 16, 864, 1246, 11616,
547, 6268, 1733, 5940, 441, 1362, 18, 2, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 2716, 6442, 352, 585, 17, 3772, 415, 12740, 16,
1329, 864, 4237, 33811, 563, 471, 469, 5415, 5647, 6791,
18, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 571, 4082, 1302, 1295, 20825, 435, 1791, 21859, 18,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 5711, 339, 1402, 427, 372, 2016, 352, 5501, 352,
1208, 16, 367, 474, 372, 4473, 441, 7820, 12244, 2578,
18, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 720, 2544, 597, 16, 569, 1295, 16, 475, 367,
2197, 43674, 33109, 824, 369, 435, 2128, 436, 372, 2197,
11617, 4292, 2819, 18, 2, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 571, 14707, 20992, 5098, 352, 4647, 493, 16, 636,
15650, 19057, 9752, 9655, 16, 11092, 17, 6991, 24185, 435,
5881, 16, 364, 4035, 339, 16, 6870, 46006, 352, 13538,
18, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 825, 1636, 2045, 1565, 2574, 16, 655, 416, 611,
3978, 1204, 471, 469, 897, 510, 367, 6732, 352, 1388,
12371, 18, 2, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 1, 19994, 1212, 339, 3544, 6590, 435, 410, 49627, 352,
14490, 24684, 16, 1707, 410, 425, 8099, 1034, 17870, 532,
367, 2014, 782, 15024, 18, 2, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]))
%% Cell type:markdown id:689e2e565cce2845 tags:
## 3. Build the sequence2sequence RNN
## 3. Build the sequence2sequence LSTM
%% Cell type:code id:e8d99510479108f4 tags:
``` python
embedding_dimension = 100
embedding_dimension = 500
embedding_matrix_enc = torch.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dimension)
embedding_matrix_dec = torch.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dimension)
class Encoder(torch.nn.Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, bidirectional: bool = False):
super(Encoder, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
# embedding matrix
self._embedding = embedding_matrix_enc
# lstm layer
self._lstm = torch. nn.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True)
self._dropout = torch.nn.Dropout(0.1)
def forward(self, embedded_sequence: torch.Tensor):
h_0 = torch.zeros(self._num_layers, embedded_sequence.size(0), self._hidden_size) #hidden state WITH batches
c_0 = torch.zeros(self._num_layers, embedded_sequence.size(0), self._hidden_size) #internal state WITH batches
def forward(self, sequence: torch.Tensor):
embedded_sequence = self._embedding(sequence)
h_0 = torch.zeros(self._num_layers, embedded_sequence.size(0), self._hidden_size)
c_0 = torch.zeros(self._num_layers, embedded_sequence.size(0), self._hidden_size)
output, (hn, cn) = self._lstm(embedded_sequence, (h_0, c_0))
return output, hn, cn
class Decoder(torch.nn.Module):
def __init__(self, input_size: int, hidden_size: int, output_size: int,
num_layers: int = 1, bidirectional: bool = False,
max_tokens: int = 40):
super(Decoder, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._max_tokens = max_tokens
# embedding matrix
self._embedding = embedding_matrix_dec
# lstm layer
self._lstm = torch. nn.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True)
# output layer (fully connected linear layer)
self._out = nn.Linear(hidden_size, output_size)
def forward(self, x):
batch_size = x[0].size(0)
hidden_state = x[1]
cell_state = x[2]
def forward(self, enc_out: torch.tensor, hidden_state: torch.tensor,
cell_state: torch.tensor, target_tensor: torch.tensor = None):
batch_size = enc_out.size(0)
outputs = []
# prepare start token
x_in = torch.empty(batch_size, 1, dtype=torch.long).fill_(1)
for i in range(self._max_tokens):
out, hidden_state, cell_state = self.forward_step(x_in, hidden_state, cell_state)
outputs.append(out)
# Without teacher forcing: use its own predictions as the next input
_, topi = out.topk(1)
x_in = topi.squeeze(-1).detach() # detach from history as input
if target_tensor is not None:
# Teacher forcing: Feed the target as the next input
x_in = target_tensor[:, i].unsqueeze(1) # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
_, topi = out.topk(1)
x_in = topi.squeeze(-1).detach() # detach from history as input
outputs = torch.cat(outputs, dim=1) # WTF is happening here!? -> TODO: Understand the code
outputs = F.log_softmax(outputs, dim=-1)
outputs = torch.cat(outputs, dim=1)
#outputs = F.log_softmax(outputs, dim=-1)
return outputs, hidden_state, cell_state
def forward_step(self, x_in, hidden_state, cell_state):
output = self._embedding(x_in)
output = F.relu(output)
output, (h_t, c_t) = self._lstm(output, (hidden_state, cell_state))
output = self._out(output)
return output, h_t, c_t
```
%% Cell type:markdown id:535bc20b2f12f2da tags:
## 4. Train the model
%% Cell type:code id:1f8d3152359f6658 tags:
``` python
LSTM_hidden_size = 128
LSTM_hidden_size = 500
max_tokens_per_sequence = 70
model = nn.Sequential(
embedding_matrix_enc,
Encoder(input_size=embedding_dimension, hidden_size=LSTM_hidden_size),
Decoder(input_size=embedding_dimension, hidden_size=LSTM_hidden_size,
output_size=vocab_size, max_tokens=max_tokens_per_sequence)
)
num_epochs = 100
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)
loss_function = torch.nn.NLLLoss()
num_epochs = 30
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
# create encoder / decoder instances
encoder = Encoder(input_size=embedding_dimension, hidden_size=LSTM_hidden_size)
decoder = Decoder(input_size=embedding_dimension, hidden_size=LSTM_hidden_size,
output_size=vocab_size, max_tokens=max_tokens_per_sequence)
for i in range(1, num_epochs + 1):
# reset gradients
optimizer.zero_grad()
# make prediction
# get training data
x_train, y_train = training_data(batch_size=32, max_tokens=max_tokens_per_sequence)
predict = model(x_train)[0]
# make prediction
encoder_out, encoder_h, encoder_c = encoder(x_train)
predict = decoder(encoder_out, encoder_h, encoder_c, y_train)
predict = predict[0]
# match dimensions of prediction & gold_label vector
predict = predict.view(-1, predict.size(-1))
y_train = y_train.view(-1)
# calculate loss & propagate it backwards
loss = loss_function(predict, y_train)
loss = criterion(predict, y_train)
loss.backward()
optimizer.step()
if i % 10 == 0:
print("---- Iteration " + str(i) + " ----")
print("loss: " + str(loss.item()))
```
%% Output
---- Iteration 10 ----
loss: 9.484991073608398
loss: -0.045227549970149994
---- Iteration 20 ----
loss: 6.541980743408203
loss: -0.1404053419828415
---- Iteration 30 ----
loss: 3.5698559284210205
---- Iteration 40 ----
loss: 2.5965120792388916
---- Iteration 50 ----
loss: 2.2094337940216064
---- Iteration 60 ----
loss: 2.1606457233428955
---- Iteration 70 ----
loss: 2.100820302963257
---- Iteration 80 ----
loss: 1.9637012481689453
---- Iteration 90 ----
loss: 2.051255464553833
---- Iteration 100 ----
loss: 2.007436752319336
loss: -0.22252100706100464
%% Cell type:markdown id:44f9b74f91565a4a tags:
## 5. Sample from the model
%% Cell type:code id:b95fb365f686125d tags:
``` python
test_sequence = ("Nein, hier funktioniert genau gar nichts")
test_sequence = ("Hi are you there")
test_sequence_enc = tokenizer.encode(test_sequence)
print(test_sequence_enc.ids)
print(test_sequence_enc.tokens)
test_sequence_batched = torch.tensor(test_sequence_enc.ids).view(1, -1)
predict, _, _ = model(test_sequence_batched)
encoder_out, encoder_h, encoder_c = encoder(test_sequence_batched)
predict, _, _ = decoder(encoder_out, encoder_h, encoder_c)
_, topi = predict.topk(1)
decoded_ids = topi.squeeze()
print(decoded_ids)
tokenizer.decode(list(decoded_ids))
```
%% Output
[1, 7458, 16, 1054, 7936, 2352, 1485, 2989, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
['[CLS]', 'H', 'i', 'are', 'you', 'there', '[SEP]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]']
tensor([ 1, 1, 1, 32460, 32460, 28617, 7251, 47089, 38628, 38628,
38628, 38495, 31040, 14290, 4593, 41094, 13045, 17103, 45127, 18564,
5320, 5320, 5320, 5320, 5320, 5320, 5320, 7784, 7784, 34640,
5320, 5320, 5320, 5320, 5320, 5320, 7784, 7784, 34640, 5320,
5320, 5320, 5320, 5320, 5320, 7784, 7784, 34640, 5320, 5320,
5320, 5320, 5320, 5320, 7784, 7784, 34640, 5320, 5320, 5320,
5320, 5320, 5320, 7784, 7784, 34640, 5320, 5320, 5320, 5320])
', , ,'
'Quantität Quantität drastischen committees Vermächtnis jana jana jana Aufrichtigkeit Fusionen ströme More Tellereisen Mod verschoben emaking auszubauen unch unch unch unch unch unch unch popul popul Bin unch unch unch unch unch unch popul popul Bin unch unch unch unch unch unch popul popul Bin unch unch unch unch unch unch popul popul Bin unch unch unch unch unch unch popul popul Bin unch unch unch unch'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment