fixes for code (#206)

* updated .gitignore

* removed unused GELU import

* fixed model_configs, fixed all tensors on same device

* removed unused tiktoken

* update

* update hparam search

* remove redundant tokenizer argument

---------

Co-authored-by: rasbt <mail@sebastianraschka.com>
This commit is contained in:
Daniel Kleine 2024-06-12 03:59:48 +02:00 committed by GitHub
parent 1a65020d81
commit dcbdc1d2e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 33 additions and 46 deletions

2
.gitignore vendored
View File

@ -20,6 +20,7 @@ ch07/01_main-chapter-code/loss-plot.pdf
# Checkpoint files
appendix-A/01_main-chapter-code/model.pth
appendix-E/01_main-chapter-code/gpt2
ch05/01_main-chapter-code/gpt2/
@ -33,6 +34,7 @@ ch06/02_bonus_additional-experiments/gpt2
ch06/03_bonus_imdb-classification/gpt2
ch07/01_main-chapter-code/gpt2-medium355M-sft.pth
ch07/01_main-chapter-code/gpt2/
# Datasets
appendix-E/01_main-chapter-code/sms_spam_collection.zip

View File

@ -1370,7 +1370,6 @@
"train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
" model, train_loader, val_loader, optimizer, device,\n",
" num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
" tokenizer=tokenizer\n",
")\n",
"\n",
"end_time = time.time()\n",
@ -1495,7 +1494,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@ -484,7 +484,7 @@ def calc_loss_batch(input_batch, target_batch, model, device):
# Overall the same as `train_model_simple` in chapter 5
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer):
eval_freq, eval_iter):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1

View File

@ -262,7 +262,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 6,
"id": "5fee2cf5-61c3-4167-81b5-44ea155bbaf2",
"metadata": {},
"outputs": [],
@ -282,13 +282,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 7,
"id": "5aa1b0c1-d78a-48fc-ad08-4802458b43f7",
"metadata": {},
"outputs": [],
"source": [
"import torch.nn as nn\n",
"from gpt import MultiHeadAttention, LayerNorm, GELU, FeedForward\n",
"from gpt import MultiHeadAttention, LayerNorm, FeedForward\n",
"\n",
"\n",
"class TransformerBlock(nn.Module):\n",
@ -351,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "1d013d32-c275-4f42-be21-9010f1537227",
"metadata": {},
"outputs": [],

View File

@ -62,12 +62,10 @@
"from importlib.metadata import version\n",
"\n",
"import matplotlib\n",
"import tiktoken\n",
"import torch\n",
"\n",
"print(\"thop version:\", version(\"thop\"))\n",
"print(\"torch version:\", version(\"torch\"))\n",
"print(\"tiktoken version:\", version(\"tiktoken\"))"
"print(\"torch version:\", version(\"torch\"))"
]
},
{

View File

@ -65,9 +65,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"numpy version: 1.25.2\n",
"torch version: 2.2.1\n",
"transformers version: 4.33.2\n"
"numpy version: 1.24.3\n",
"torch version: 2.3.0\n",
"transformers version: 4.41.2\n"
]
}
],
@ -85,16 +85,6 @@
"id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n",
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n"
]
},
{
"data": {
"text/plain": [
@ -162,10 +152,10 @@
"}\n",
"\n",
"model_configs = {\n",
" \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
" \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
"}\n",
"\n",
"\n",
@ -242,7 +232,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"/tmp/ipykernel_9385/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
" return torch.nn.Parameter(torch.tensor(right))\n"
]
}
@ -255,13 +245,12 @@
"gpt = GPTModel(BASE_CONFIG)\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"load_weights(gpt, gpt_hf)\n",
"gpt.to(device);"
"load_weights(gpt, gpt_hf)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "4ddd0d51-3ade-4890-9bab-d63f141d095f",
"metadata": {},
"outputs": [
@ -285,8 +274,8 @@
"tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
"\n",
"token_ids = generate(\n",
" model=gpt,\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n",
" model=gpt.to(device),\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer).to(device),\n",
" max_new_tokens=30,\n",
" context_size=BASE_CONFIG[\"context_length\"],\n",
" top_k=1,\n",

View File

@ -53,8 +53,8 @@ def calc_loss_batch(input_batch, target_batch, model, device):
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
model.eval()
with torch.no_grad():
train_loss = calc_loss_loader(train_loader, model, device, num_iters=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_iters=eval_iter)
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
model.train()
return train_loss, val_loss

View File

@ -40,12 +40,12 @@ class GPTDatasetV1(Dataset):
def create_dataloader_v1(txt, batch_size=4, max_length=256,
stride=128, shuffle=True, drop_last=True):
stride=128, shuffle=True, drop_last=True, num_workers=0):
# Initialize the tokenizer
tokenizer = tiktoken.get_encoding("gpt2")
# Create dataset
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride, num_workers=0)
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
# Create dataloader
dataloader = DataLoader(

View File

@ -1861,7 +1861,7 @@
"source": [
"# Overall the same as `train_model_simple` in chapter 5\n",
"def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,\n",
" eval_freq, eval_iter, tokenizer):\n",
" eval_freq, eval_iter):\n",
" # Initialize lists to track losses and examples seen\n",
" train_losses, val_losses, train_accs, val_accs = [], [], [], []\n",
" examples_seen, global_step = 0, -1\n",
@ -1982,7 +1982,6 @@
"train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
" model, train_loader, val_loader, optimizer, device,\n",
" num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
" tokenizer=tokenizer\n",
")\n",
"\n",
"end_time = time.time()\n",
@ -2371,7 +2370,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@ -235,7 +235,7 @@ def evaluate_model(model, train_loader, val_loader, device,
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token_pos=-1,
eval_freq, eval_iter, max_steps=None, trainable_token_pos=-1,
accumulation_steps=1, ignore_index=-100):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
@ -565,7 +565,7 @@ if __name__ == "__main__":
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=args.num_epochs, eval_freq=50, eval_iter=5,
tokenizer=tokenizer, max_steps=None, trainable_token_pos=args.trainable_token_pos,
max_steps=None, trainable_token_pos=args.trainable_token_pos,
accumulation_steps=args.accumulation_steps
)

View File

@ -110,7 +110,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter):
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None):
eval_freq, eval_iter, max_steps=None):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1
@ -279,7 +279,7 @@ if __name__ == "__main__":
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=num_epochs, eval_freq=50, eval_iter=20,
tokenizer=tokenizer, max_steps=None
max_steps=None
)
end_time = time.time()

View File

@ -139,7 +139,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter, trainable
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token=-1):
eval_freq, eval_iter, max_steps=None, trainable_token=-1):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1
@ -344,7 +344,7 @@ if __name__ == "__main__":
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=num_epochs, eval_freq=50, eval_iter=20,
tokenizer=tokenizer, max_steps=None, trainable_token=args.trainable_token
max_steps=None, trainable_token=args.trainable_token
)
end_time = time.time()