mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2025-10-04 12:38:17 +00:00
Add alternative weight loading strategy as backup (#82)
This commit is contained in:
parent
820d5e3ed1
commit
4582995ced
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
ch05/02_alternative_weight_loading/checkpoints
|
||||
ch05/01_main-chapter-code/the-verdict.txt
|
||||
|
||||
.DS_Store
|
||||
|
7
ch05/01_main-chapter-code/README.md
Normal file
7
ch05/01_main-chapter-code/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Chapter 5: Pretraining on Unlabeled Data
|
||||
|
||||
- [ch05.ipynb](ch05.ipynb) contains all the code as it appears in the chapter
|
||||
- [previous_chapters.py](previous_chapters.py) is a Python module that contains the `MultiHeadAttention` module from the previous chapter, which we import in [ch05.ipynb](ch05.ipynb) to pretrain the GPT model
|
||||
- [train.py](train.py) is a standalone Python script file with the code that we implemented in [ch05.ipynb](ch05.ipynb) to train the GPT model
|
||||
- [generate.py](generate.py) is a standalone Python script file with the code that we implemented in [ch05.ipynb](ch05.ipynb) to load and use the pretrained model weights from OpenAI
|
||||
|
@ -199,16 +199,17 @@ def main(gpt_config, input_prompt, model_size):
|
||||
gpt = GPTModel(gpt_config)
|
||||
load_weights_into_gpt(gpt, params)
|
||||
gpt.to(device)
|
||||
gpt.eval()
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
token_ids = generate(
|
||||
model=gpt,
|
||||
idx=text_to_token_ids(input_prompt, tokenizer),
|
||||
max_new_tokens=65,
|
||||
max_new_tokens=30,
|
||||
context_size=gpt_config["ctx_len"],
|
||||
top_k=50,
|
||||
temperature=1.5
|
||||
top_k=1,
|
||||
temperature=1.0
|
||||
)
|
||||
|
||||
print("Output text:\n", token_ids_to_text(token_ids, tokenizer))
|
||||
@ -219,7 +220,7 @@ if __name__ == "__main__":
|
||||
torch.manual_seed(123)
|
||||
|
||||
CHOOSE_MODEL = "gpt2-small"
|
||||
INPUT_PROMPT = "Every effort moves you"
|
||||
INPUT_PROMPT = "Every effort moves"
|
||||
|
||||
BASE_CONFIG = {
|
||||
"vocab_size": 50257, # Vocabulary size
|
||||
|
5
ch05/02_alternative_weight_loading/README.md
Normal file
5
ch05/02_alternative_weight_loading/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Alternative Weight Loading
|
||||
|
||||
This folder contains alternative weight loading strategies in case the weights become unavailable from Open AI.
|
||||
|
||||
- [weight-loading-hf-transformers.ipynb](weight-loading-hf-transformers.ipynb): contains code to load the weights from the Hugging Face Model Hub via the `transformers` library
|
287
ch05/02_alternative_weight_loading/previous_chapters.py
Normal file
287
ch05/02_alternative_weight_loading/previous_chapters.py
Normal file
@ -0,0 +1,287 @@
|
||||
# This file collects all the relevant code that we covered thus far
|
||||
# throughout Chapters 2-4.
|
||||
# This file can be run as a standalone script.
|
||||
|
||||
import tiktoken
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
|
||||
#####################################
|
||||
# Chapter 2
|
||||
#####################################
|
||||
|
||||
|
||||
class GPTDatasetV1(Dataset):
|
||||
def __init__(self, txt, tokenizer, max_length, stride):
|
||||
self.tokenizer = tokenizer
|
||||
self.input_ids = []
|
||||
self.target_ids = []
|
||||
|
||||
# Tokenize the entire text
|
||||
token_ids = tokenizer.encode(txt)
|
||||
|
||||
# Use a sliding window to chunk the book into overlapping sequences of max_length
|
||||
for i in range(0, len(token_ids) - max_length, stride):
|
||||
input_chunk = token_ids[i:i + max_length]
|
||||
target_chunk = token_ids[i + 1: i + max_length + 1]
|
||||
self.input_ids.append(torch.tensor(input_chunk))
|
||||
self.target_ids.append(torch.tensor(target_chunk))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.input_ids)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self.input_ids[idx], self.target_ids[idx]
|
||||
|
||||
|
||||
def create_dataloader_v1(txt, batch_size=4, max_length=256,
|
||||
stride=128, shuffle=True, drop_last=True):
|
||||
# Initialize the tokenizer
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
# Create dataset
|
||||
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
|
||||
|
||||
# Create dataloader
|
||||
dataloader = DataLoader(
|
||||
dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
|
||||
|
||||
return dataloader
|
||||
|
||||
|
||||
#####################################
|
||||
# Chapter 3
|
||||
#####################################
|
||||
class MultiHeadAttention(nn.Module):
|
||||
def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
|
||||
super().__init__()
|
||||
assert d_out % num_heads == 0, "d_out must be divisible by n_heads"
|
||||
|
||||
self.d_out = d_out
|
||||
self.num_heads = num_heads
|
||||
self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
|
||||
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1))
|
||||
|
||||
def forward(self, x):
|
||||
b, num_tokens, d_in = x.shape
|
||||
|
||||
keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
# We implicitly split the matrix by adding a `num_heads` dimension
|
||||
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
|
||||
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
|
||||
# Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
|
||||
keys = keys.transpose(1, 2)
|
||||
queries = queries.transpose(1, 2)
|
||||
values = values.transpose(1, 2)
|
||||
|
||||
# Compute scaled dot-product attention (aka self-attention) with a causal mask
|
||||
attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
|
||||
|
||||
# Original mask truncated to the number of tokens and converted to boolean
|
||||
mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
|
||||
|
||||
# Use the mask to fill attention scores
|
||||
attn_scores.masked_fill_(mask_bool, -torch.inf)
|
||||
|
||||
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
|
||||
attn_weights = self.dropout(attn_weights)
|
||||
|
||||
# Shape: (b, num_tokens, num_heads, head_dim)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
|
||||
# Combine heads, where self.d_out = self.num_heads * self.head_dim
|
||||
context_vec = context_vec.reshape(b, num_tokens, self.d_out)
|
||||
context_vec = self.out_proj(context_vec) # optional projection
|
||||
|
||||
return context_vec
|
||||
|
||||
|
||||
#####################################
|
||||
# Chapter 4
|
||||
#####################################
|
||||
class LayerNorm(nn.Module):
|
||||
def __init__(self, emb_dim):
|
||||
super().__init__()
|
||||
self.eps = 1e-5
|
||||
self.scale = nn.Parameter(torch.ones(emb_dim))
|
||||
self.shift = nn.Parameter(torch.zeros(emb_dim))
|
||||
|
||||
def forward(self, x):
|
||||
mean = x.mean(dim=-1, keepdim=True)
|
||||
var = x.var(dim=-1, keepdim=True, unbiased=False)
|
||||
norm_x = (x - mean) / torch.sqrt(var + self.eps)
|
||||
return self.scale * norm_x + self.shift
|
||||
|
||||
|
||||
class GELU(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return 0.5 * x * (1 + torch.tanh(
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
(x + 0.044715 * torch.pow(x, 3))
|
||||
))
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]),
|
||||
GELU(),
|
||||
nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]),
|
||||
nn.Dropout(cfg["drop_rate"])
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layers(x)
|
||||
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.att = MultiHeadAttention(
|
||||
d_in=cfg["emb_dim"],
|
||||
d_out=cfg["emb_dim"],
|
||||
block_size=cfg["ctx_len"],
|
||||
num_heads=cfg["n_heads"],
|
||||
dropout=cfg["drop_rate"],
|
||||
qkv_bias=cfg["qkv_bias"])
|
||||
self.ff = FeedForward(cfg)
|
||||
self.norm1 = LayerNorm(cfg["emb_dim"])
|
||||
self.norm2 = LayerNorm(cfg["emb_dim"])
|
||||
self.drop_resid = nn.Dropout(cfg["drop_rate"])
|
||||
|
||||
def forward(self, x):
|
||||
# Shortcut connection for attention block
|
||||
shortcut = x
|
||||
x = self.norm1(x)
|
||||
x = self.att(x) # Shape [batch_size, num_tokens, emb_size]
|
||||
x = self.drop_resid(x)
|
||||
x = x + shortcut # Add the original input back
|
||||
|
||||
# Shortcut connection for feed-forward block
|
||||
shortcut = x
|
||||
x = self.norm2(x)
|
||||
x = self.ff(x)
|
||||
x = self.drop_resid(x)
|
||||
x = x + shortcut # Add the original input back
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class GPTModel(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])
|
||||
self.pos_emb = nn.Embedding(cfg["ctx_len"], cfg["emb_dim"])
|
||||
self.drop_emb = nn.Dropout(cfg["drop_rate"])
|
||||
|
||||
self.trf_blocks = nn.Sequential(
|
||||
*[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])
|
||||
|
||||
self.final_norm = LayerNorm(cfg["emb_dim"])
|
||||
self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False)
|
||||
|
||||
def forward(self, in_idx):
|
||||
batch_size, seq_len = in_idx.shape
|
||||
tok_embeds = self.tok_emb(in_idx)
|
||||
pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))
|
||||
x = tok_embeds + pos_embeds # Shape [batch_size, num_tokens, emb_size]
|
||||
x = self.drop_emb(x)
|
||||
x = self.trf_blocks(x)
|
||||
x = self.final_norm(x)
|
||||
logits = self.out_head(x)
|
||||
return logits
|
||||
|
||||
|
||||
def generate_text_simple(model, idx, max_new_tokens, context_size):
|
||||
# idx is (B, T) array of indices in the current context
|
||||
for _ in range(max_new_tokens):
|
||||
|
||||
# Crop current context if it exceeds the supported context size
|
||||
# E.g., if LLM supports only 5 tokens, and the context size is 10
|
||||
# then only the last 5 tokens are used as context
|
||||
idx_cond = idx[:, -context_size:]
|
||||
|
||||
# Get the predictions
|
||||
with torch.no_grad():
|
||||
logits = model(idx_cond)
|
||||
|
||||
# Focus only on the last time step
|
||||
# (batch, n_token, vocab_size) becomes (batch, vocab_size)
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# Get the idx of the vocab entry with the highest logits value
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1)
|
||||
|
||||
# Append sampled index to the running sequence
|
||||
idx = torch.cat((idx, idx_next), dim=1) # (batch, n_tokens+1)
|
||||
|
||||
return idx
|
||||
|
||||
|
||||
#####################################
|
||||
# Chapter 5
|
||||
#####################################
|
||||
|
||||
|
||||
def text_to_token_ids(text, tokenizer):
|
||||
encoded = tokenizer.encode(text)
|
||||
encoded_tensor = torch.tensor(encoded).unsqueeze(0) # add batch dimension
|
||||
return encoded_tensor
|
||||
|
||||
|
||||
def token_ids_to_text(token_ids, tokenizer):
|
||||
flat = token_ids.squeeze(0) # remove batch dimension
|
||||
return tokenizer.decode(flat.tolist())
|
||||
|
||||
|
||||
def generate(model, idx, max_new_tokens, context_size, temperature, top_k=None):
|
||||
|
||||
# For-loop is the same as before: Get logits, and only focus on last time step
|
||||
for _ in range(max_new_tokens):
|
||||
idx_cond = idx[:, -context_size:]
|
||||
with torch.no_grad():
|
||||
logits = model(idx_cond)
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# New: Filter logits with top_k sampling
|
||||
if top_k is not None:
|
||||
# Keep only top_k values
|
||||
top_logits, _ = torch.topk(logits, top_k)
|
||||
min_val = top_logits[:, -1]
|
||||
logits = torch.where(logits < min_val, torch.tensor(float('-inf')).to(logits.device), logits)
|
||||
|
||||
# New: Apply temperature scaling
|
||||
if temperature > 0.0:
|
||||
logits = logits / temperature
|
||||
|
||||
# Apply softmax to get probabilities
|
||||
probs = torch.softmax(logits, dim=-1) # (batch_size, context_len)
|
||||
|
||||
# Sample from the distribution
|
||||
idx_next = torch.multinomial(probs, num_samples=1) # (batch_size, 1)
|
||||
|
||||
# Otherwise same as before: get idx of the vocab entry with the highest logits value
|
||||
else:
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch_size, 1)
|
||||
|
||||
# Same as before: append sampled index to the running sequence
|
||||
idx = torch.cat((idx, idx_next), dim=1) # (batch_size, num_tokens+1)
|
||||
|
||||
return idx
|
@ -0,0 +1,312 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d6bc54f-2b16-4b0f-be69-957eed5d112f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<font size=\"1\">\n",
|
||||
"Supplementary code for \"Build a Large Language Model From Scratch\": <a href=\"https://www.manning.com/books/build-a-large-language-model-from-scratch\">https://www.manning.com/books/build-a-large-language-model-from-scratch</a> by <a href=\"https://sebastianraschka.com\">Sebastian Raschka</a><br>\n",
|
||||
"Code repository: <a href=\"https://github.com/rasbt/LLMs-from-scratch\">https://github.com/rasbt/LLMs-from-scratch</a>\n",
|
||||
"</font>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72953590-5363-4398-85ce-54bde07f3d8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Bonus Code for Chapter 5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a4ab5ee-e7b9-45d3-a82b-a12bcfc0945a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Alternative Weight Loading from Hugging Face Model Hub using Transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2feea87-49f0-48b9-b925-b8f0dda4096f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- In the main chapter, we loaded the GPT model weights directly from OpenAI\n",
|
||||
"- This notebook provides alternative weight loading code to load the model weights from the [Hugging Face Model Hub](https://huggingface.co/docs/hub/en/models-the-hub) using the `transformers` Python library"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "99b77109-5215-4d07-a618-4d10eff1a488",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b0467eff-b43c-4a38-93e8-5ed87a5fc2b1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"numpy version: 1.25.2\n",
|
||||
"torch version: 2.2.1\n",
|
||||
"transformers version: 4.33.2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from importlib.metadata import version\n",
|
||||
"\n",
|
||||
"pkgs = [\"numpy\", \"torch\", \"transformers\"]\n",
|
||||
"for p in pkgs:\n",
|
||||
" print(f\"{p} version: {version(p)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
|
||||
" torch.utils._pytree._register_pytree_node(\n",
|
||||
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
|
||||
" torch.utils._pytree._register_pytree_node(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"GPT2Model(\n",
|
||||
" (wte): Embedding(50257, 768)\n",
|
||||
" (wpe): Embedding(1024, 768)\n",
|
||||
" (drop): Dropout(p=0.1, inplace=False)\n",
|
||||
" (h): ModuleList(\n",
|
||||
" (0-11): 12 x GPT2Block(\n",
|
||||
" (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
||||
" (attn): GPT2Attention(\n",
|
||||
" (c_attn): Conv1D()\n",
|
||||
" (c_proj): Conv1D()\n",
|
||||
" (attn_dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" (resid_dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
||||
" (mlp): GPT2MLP(\n",
|
||||
" (c_fc): Conv1D()\n",
|
||||
" (c_proj): Conv1D()\n",
|
||||
" (act): NewGELUActivation()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from transformers import GPT2Model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# allowed model names\n",
|
||||
"model_names = {\n",
|
||||
" \"gpt2-small\": \"openai-community/gpt2\", # 124M\n",
|
||||
" \"gpt2-medium\": \"openai-community/gpt2-medium\", # 355M\n",
|
||||
" \"gpt2-large\": \"openai-community/gpt2-large\", # 774M\n",
|
||||
" \"gpt2-xl\": \"openai-community/gpt2-xl\" # 1558M\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"CHOOSE_MODEL = \"gpt2-small\"\n",
|
||||
"\n",
|
||||
"gpt_hf = GPT2Model.from_pretrained(model_names[CHOOSE_MODEL], cache_dir=\"checkpoints\")\n",
|
||||
"gpt_hf.eval()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea9b1bc-7881-46ad-9555-27a9cf23faa7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BASE_CONFIG = {\n",
|
||||
" \"vocab_size\": 50257, # Vocabulary size\n",
|
||||
" \"ctx_len\": 1024, # Context length\n",
|
||||
" \"drop_rate\": 0.0, # Dropout rate\n",
|
||||
" \"qkv_bias\": True # Query-key-value bias\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"model_configs = {\n",
|
||||
" \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
|
||||
" \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
|
||||
" \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
|
||||
" \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"BASE_CONFIG.update(model_configs[CHOOSE_MODEL])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4e2a4cf4-a54e-4307-9141-fb9f288e4dfa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def assign_check(left, right):\n",
|
||||
" if left.shape != right.shape:\n",
|
||||
" raise ValueError(f\"Shape mismatch. Left: {left.shape}, Right: {right.shape}\")\n",
|
||||
" return torch.nn.Parameter(torch.tensor(right))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "75be3077-f141-44bb-af88-62580ffd224c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def load_weights(gpt, gpt_hf):\n",
|
||||
"\n",
|
||||
" d = gpt_hf.state_dict()\n",
|
||||
"\n",
|
||||
" gpt.pos_emb.weight = assign_check(gpt.pos_emb.weight, d[\"wpe.weight\"])\n",
|
||||
" gpt.tok_emb.weight = assign_check(gpt.tok_emb.weight, d[\"wte.weight\"])\n",
|
||||
" \n",
|
||||
" for b in range(BASE_CONFIG[\"n_layers\"]):\n",
|
||||
" q_w, k_w, v_w = np.split(d[f\"h.{b}.attn.c_attn.weight\"], 3, axis=-1)\n",
|
||||
" gpt.trf_blocks[b].att.W_query.weight = assign_check(gpt.trf_blocks[b].att.W_query.weight, q_w.T)\n",
|
||||
" gpt.trf_blocks[b].att.W_key.weight = assign_check(gpt.trf_blocks[b].att.W_key.weight, k_w.T)\n",
|
||||
" gpt.trf_blocks[b].att.W_value.weight = assign_check(gpt.trf_blocks[b].att.W_value.weight, v_w.T)\n",
|
||||
" \n",
|
||||
" q_b, k_b, v_b = np.split(d[f\"h.{b}.attn.c_attn.bias\"], 3, axis=-1)\n",
|
||||
" gpt.trf_blocks[b].att.W_query.bias = assign_check(gpt.trf_blocks[b].att.W_query.bias, q_b)\n",
|
||||
" gpt.trf_blocks[b].att.W_key.bias = assign_check(gpt.trf_blocks[b].att.W_key.bias, k_b)\n",
|
||||
" gpt.trf_blocks[b].att.W_value.bias = assign_check(gpt.trf_blocks[b].att.W_value.bias, v_b)\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" gpt.trf_blocks[b].att.out_proj.weight = assign_check(gpt.trf_blocks[b].att.out_proj.weight, d[f\"h.{b}.attn.c_proj.weight\"].T)\n",
|
||||
" gpt.trf_blocks[b].att.out_proj.bias = assign_check(gpt.trf_blocks[b].att.out_proj.bias, d[f\"h.{b}.attn.c_proj.bias\"])\n",
|
||||
" \n",
|
||||
" gpt.trf_blocks[b].ff.layers[0].weight = assign_check(gpt.trf_blocks[b].ff.layers[0].weight, d[f\"h.{b}.mlp.c_fc.weight\"].T)\n",
|
||||
" gpt.trf_blocks[b].ff.layers[0].bias = assign_check(gpt.trf_blocks[b].ff.layers[0].bias, d[f\"h.{b}.mlp.c_fc.bias\"])\n",
|
||||
" gpt.trf_blocks[b].ff.layers[2].weight = assign_check(gpt.trf_blocks[b].ff.layers[2].weight, d[f\"h.{b}.mlp.c_proj.weight\"].T)\n",
|
||||
" gpt.trf_blocks[b].ff.layers[2].bias = assign_check(gpt.trf_blocks[b].ff.layers[2].bias, d[f\"h.{b}.mlp.c_proj.bias\"])\n",
|
||||
" \n",
|
||||
" gpt.trf_blocks[b].norm1.scale = assign_check(gpt.trf_blocks[b].norm1.scale, d[f\"h.{b}.ln_1.weight\"])\n",
|
||||
" gpt.trf_blocks[b].norm1.shift = assign_check(gpt.trf_blocks[b].norm1.shift, d[f\"h.{b}.ln_1.bias\"])\n",
|
||||
" gpt.trf_blocks[b].norm2.scale = assign_check(gpt.trf_blocks[b].norm2.scale, d[f\"h.{b}.ln_2.weight\"])\n",
|
||||
" gpt.trf_blocks[b].norm2.shift = assign_check(gpt.trf_blocks[b].norm2.shift, d[f\"h.{b}.ln_2.bias\"])\n",
|
||||
" \n",
|
||||
" gpt.final_norm.scale = assign_check(gpt.final_norm.scale, d[f\"ln_f.weight\"])\n",
|
||||
" gpt.final_norm.shift = assign_check(gpt.final_norm.shift, d[f\"ln_f.bias\"])\n",
|
||||
" gpt.out_head.weight = assign_check(gpt.out_head.weight, d[\"wte.weight\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cda44d37-92c0-4c19-a70a-15711513afce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
|
||||
" return torch.nn.Parameter(torch.tensor(right))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from previous_chapters import GPTModel\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"gpt = GPTModel(BASE_CONFIG)\n",
|
||||
"\n",
|
||||
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
||||
"load_weights(gpt, gpt_hf)\n",
|
||||
"gpt.to(device);"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4ddd0d51-3ade-4890-9bab-d63f141d095f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Output text:\n",
|
||||
" Every effort moves forward, but it's not enough.\n",
|
||||
"\n",
|
||||
"\"I'm not going to sit here and say, 'I'm not going to do this,'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from previous_chapters import generate, text_to_token_ids, token_ids_to_text\n",
|
||||
"\n",
|
||||
"torch.manual_seed(123)\n",
|
||||
"\n",
|
||||
"tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
|
||||
"\n",
|
||||
"token_ids = generate(\n",
|
||||
" model=gpt,\n",
|
||||
" idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n",
|
||||
" max_new_tokens=30,\n",
|
||||
" context_size=BASE_CONFIG[\"ctx_len\"],\n",
|
||||
" top_k=1,\n",
|
||||
" temperature=1.0\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Output text:\\n\", token_ids_to_text(token_ids, tokenizer))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
# Chapter 5: Pretraining on Unlabeled Data
|
||||
|
||||
- [01_main-chapter-code](01_main-chapter-code) contains the main chapter code.
|
||||
- [02_hparam_tuning](02_hparam_tuning) contains an optional hyperparameter tuning script
|
||||
- [01_main-chapter-code](01_main-chapter-code) contains the main chapter code
|
||||
- [02_alternative_weight_loading](02_alternative_weight_loading) contains code to load the GPT model weights from alternative places in case the model weights become unavailable from OpenAI
|
||||
- [03_bonus_pretraining_on_gutenberg](03_bonus_pretraining_on_gutenberg) contains code to pretrain the LLM longer on the whole corpus of books from Project Gutenberg
|
||||
- [04_hparam_tuning](04_hparam_tuning) contains an optional hyperparameter tuning script
|
Loading…
x
Reference in New Issue
Block a user