Update pep8 (#78)

* simplify requirements file

* style

* apply linter
This commit is contained in:
Sebastian Raschka 2024-03-18 08:16:17 -05:00 committed by GitHub
parent e316cafd9f
commit 9d6da22ebb
13 changed files with 82 additions and 120 deletions

View File

@ -25,9 +25,10 @@ def get_packages(pkgs):
except AttributeError:
try:
versions.append(imported.version_info)
except:
except AttributeError:
try:
import importlib, importlib_metadata
import importlib
import importlib_metadata
imported = importlib.import_module(p)
version = importlib_metadata.version(p)
versions.append(version)

View File

@ -131,7 +131,7 @@ def main(rank, world_size, num_epochs):
loss.backward()
optimizer.step()
### LOGGING
# LOGGING
print(f"[GPU{rank}] Epoch: {epoch+1:03d}/{num_epochs:03d}"
f" | Batchsize {labels.shape[0]:03d}"
f" | Train/Val Loss: {loss:.2f}")

View File

@ -1,39 +1,3 @@
"""
Byte pair encoding utilities
Code from https://github.com/openai/gpt-2/blob/master/src/encoder.py
And modified code (download_vocab) from
https://github.com/openai/gpt-2/blob/master/download_model.py
Modified MIT License
Software Copyright (c) 2019 OpenAI
We dont claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The above copyright notice and this permission notice need not be included
with content created by the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import json
import regex as re
@ -41,6 +5,7 @@ import requests
from tqdm import tqdm
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
@ -63,9 +28,10 @@ def bytes_to_unicode():
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
@ -75,6 +41,7 @@ def get_pairs(word):
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
@ -85,7 +52,7 @@ class Encoder:
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
@ -109,7 +76,7 @@ class Encoder:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break
@ -141,16 +108,14 @@ class Encoder:
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder(model_name, models_dir):
with open(os.path.join(models_dir, model_name, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join(models_dir, model_name, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
return Encoder(encoder=encoder, bpe_merges=bpe_merges)
def download_vocab():
@ -161,8 +126,7 @@ def download_vocab():
subdir = subdir.replace('\\', '/') # needed for Windows
for filename in ['encoder.json', 'vocab.bpe']:
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/models/117M" + "/" + filename, stream=True)
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/models/117M/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])

View File

@ -44,7 +44,6 @@ class MultiHeadAttentionWrapper(nn.Module):
return self.out_proj(context_vec)
class MultiHeadAttention(nn.Module):
def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
super().__init__()

View File

@ -99,7 +99,7 @@ def train_model_simple(model, optimizer, device, n_epochs,
max_length=GPT_CONFIG_124M["ctx_len"],
stride=GPT_CONFIG_124M["ctx_len"]
)
print(f"Training ...")
print("Training ...")
model.train()
for input_batch, target_batch in train_loader:
optimizer.zero_grad()

View File

@ -9,11 +9,11 @@ from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
#####################################
# Chapter 2
#####################################
class GPTDatasetV1(Dataset):
def __init__(self, txt, tokenizer, max_length, stride):
self.tokenizer = tokenizer
@ -310,5 +310,3 @@ def text_to_token_ids(text, tokenizer):
def token_ids_to_text(token_ids, tokenizer):
flat = token_ids.squeeze(0) # remove batch dimension
return tokenizer.decode(flat.tolist())