mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2025-07-28 03:14:00 +00:00
234 lines
9.4 KiB
Python
234 lines
9.4 KiB
Python
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
|
# Source for "Build a Large Language Model From Scratch"
|
|
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
|
# Code: https://github.com/rasbt/LLMs-from-scratch
|
|
|
|
from .ch04 import generate_text_simple
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
from matplotlib.ticker import MaxNLocator
|
|
import torch
|
|
|
|
|
|
def generate(model, idx, max_new_tokens, context_size, temperature=0.0, top_k=None, eos_id=None):
|
|
|
|
# For-loop is the same as before: Get logits, and only focus on last time step
|
|
for _ in range(max_new_tokens):
|
|
idx_cond = idx[:, -context_size:]
|
|
with torch.no_grad():
|
|
logits = model(idx_cond)
|
|
logits = logits[:, -1, :]
|
|
|
|
# New: Filter logits with top_k sampling
|
|
if top_k is not None:
|
|
# Keep only top_k values
|
|
top_logits, _ = torch.topk(logits, top_k)
|
|
min_val = top_logits[:, -1]
|
|
logits = torch.where(logits < min_val, torch.tensor(float('-inf')).to(logits.device), logits)
|
|
|
|
# New: Apply temperature scaling
|
|
if temperature > 0.0:
|
|
logits = logits / temperature
|
|
|
|
# Apply softmax to get probabilities
|
|
probs = torch.softmax(logits, dim=-1) # (batch_size, context_len)
|
|
|
|
# Sample from the distribution
|
|
idx_next = torch.multinomial(probs, num_samples=1) # (batch_size, 1)
|
|
|
|
# Otherwise same as before: get idx of the vocab entry with the highest logits value
|
|
else:
|
|
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch_size, 1)
|
|
|
|
if idx_next == eos_id: # Stop generating early if end-of-sequence token is encountered and eos_id is specified
|
|
break
|
|
|
|
# Same as before: append sampled index to the running sequence
|
|
idx = torch.cat((idx, idx_next), dim=1) # (batch_size, num_tokens+1)
|
|
|
|
return idx
|
|
|
|
|
|
def train_model_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
|
|
eval_freq, eval_iter, start_context, tokenizer):
|
|
# Initialize lists to track losses and tokens seen
|
|
train_losses, val_losses, track_tokens_seen = [], [], []
|
|
tokens_seen, global_step = 0, -1
|
|
|
|
# Main training loop
|
|
for epoch in range(num_epochs):
|
|
model.train() # Set model to training mode
|
|
|
|
for input_batch, target_batch in train_loader:
|
|
optimizer.zero_grad() # Reset loss gradients from previous batch iteration
|
|
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
|
loss.backward() # Calculate loss gradients
|
|
optimizer.step() # Update model weights using loss gradients
|
|
tokens_seen += input_batch.numel()
|
|
global_step += 1
|
|
|
|
# Optional evaluation step
|
|
if global_step % eval_freq == 0:
|
|
train_loss, val_loss = evaluate_model(
|
|
model, train_loader, val_loader, device, eval_iter)
|
|
train_losses.append(train_loss)
|
|
val_losses.append(val_loss)
|
|
track_tokens_seen.append(tokens_seen)
|
|
print(f"Ep {epoch+1} (Step {global_step:06d}): "
|
|
f"Train loss {train_loss:.3f}, Val loss {val_loss:.3f}")
|
|
|
|
# Print a sample text after each epoch
|
|
generate_and_print_sample(
|
|
model, tokenizer, device, start_context
|
|
)
|
|
|
|
return train_losses, val_losses, track_tokens_seen
|
|
|
|
|
|
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
|
|
model.eval()
|
|
with torch.no_grad():
|
|
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
|
|
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
|
|
model.train()
|
|
return train_loss, val_loss
|
|
|
|
|
|
def generate_and_print_sample(model, tokenizer, device, start_context):
|
|
model.eval()
|
|
context_size = model.pos_emb.weight.shape[0]
|
|
encoded = text_to_token_ids(start_context, tokenizer).to(device)
|
|
with torch.no_grad():
|
|
token_ids = generate_text_simple(
|
|
model=model, idx=encoded,
|
|
max_new_tokens=50, context_size=context_size
|
|
)
|
|
decoded_text = token_ids_to_text(token_ids, tokenizer)
|
|
print(decoded_text.replace("\n", " ")) # Compact print format
|
|
model.train()
|
|
|
|
|
|
def assign(left, right):
|
|
if left.shape != right.shape:
|
|
raise ValueError(f"Shape mismatch. Left: {left.shape}, Right: {right.shape}")
|
|
return torch.nn.Parameter(torch.tensor(right))
|
|
|
|
|
|
def load_weights_into_gpt(gpt, params):
|
|
gpt.pos_emb.weight = assign(gpt.pos_emb.weight, params['wpe'])
|
|
gpt.tok_emb.weight = assign(gpt.tok_emb.weight, params['wte'])
|
|
|
|
for b in range(len(params["blocks"])):
|
|
q_w, k_w, v_w = np.split(
|
|
(params["blocks"][b]["attn"]["c_attn"])["w"], 3, axis=-1)
|
|
gpt.trf_blocks[b].att.W_query.weight = assign(
|
|
gpt.trf_blocks[b].att.W_query.weight, q_w.T)
|
|
gpt.trf_blocks[b].att.W_key.weight = assign(
|
|
gpt.trf_blocks[b].att.W_key.weight, k_w.T)
|
|
gpt.trf_blocks[b].att.W_value.weight = assign(
|
|
gpt.trf_blocks[b].att.W_value.weight, v_w.T)
|
|
|
|
q_b, k_b, v_b = np.split(
|
|
(params["blocks"][b]["attn"]["c_attn"])["b"], 3, axis=-1)
|
|
gpt.trf_blocks[b].att.W_query.bias = assign(
|
|
gpt.trf_blocks[b].att.W_query.bias, q_b)
|
|
gpt.trf_blocks[b].att.W_key.bias = assign(
|
|
gpt.trf_blocks[b].att.W_key.bias, k_b)
|
|
gpt.trf_blocks[b].att.W_value.bias = assign(
|
|
gpt.trf_blocks[b].att.W_value.bias, v_b)
|
|
|
|
gpt.trf_blocks[b].att.out_proj.weight = assign(
|
|
gpt.trf_blocks[b].att.out_proj.weight,
|
|
params["blocks"][b]["attn"]["c_proj"]["w"].T)
|
|
gpt.trf_blocks[b].att.out_proj.bias = assign(
|
|
gpt.trf_blocks[b].att.out_proj.bias,
|
|
params["blocks"][b]["attn"]["c_proj"]["b"])
|
|
|
|
gpt.trf_blocks[b].ff.layers[0].weight = assign(
|
|
gpt.trf_blocks[b].ff.layers[0].weight,
|
|
params["blocks"][b]["mlp"]["c_fc"]["w"].T)
|
|
gpt.trf_blocks[b].ff.layers[0].bias = assign(
|
|
gpt.trf_blocks[b].ff.layers[0].bias,
|
|
params["blocks"][b]["mlp"]["c_fc"]["b"])
|
|
gpt.trf_blocks[b].ff.layers[2].weight = assign(
|
|
gpt.trf_blocks[b].ff.layers[2].weight,
|
|
params["blocks"][b]["mlp"]["c_proj"]["w"].T)
|
|
gpt.trf_blocks[b].ff.layers[2].bias = assign(
|
|
gpt.trf_blocks[b].ff.layers[2].bias,
|
|
params["blocks"][b]["mlp"]["c_proj"]["b"])
|
|
|
|
gpt.trf_blocks[b].norm1.scale = assign(
|
|
gpt.trf_blocks[b].norm1.scale,
|
|
params["blocks"][b]["ln_1"]["g"])
|
|
gpt.trf_blocks[b].norm1.shift = assign(
|
|
gpt.trf_blocks[b].norm1.shift,
|
|
params["blocks"][b]["ln_1"]["b"])
|
|
gpt.trf_blocks[b].norm2.scale = assign(
|
|
gpt.trf_blocks[b].norm2.scale,
|
|
params["blocks"][b]["ln_2"]["g"])
|
|
gpt.trf_blocks[b].norm2.shift = assign(
|
|
gpt.trf_blocks[b].norm2.shift,
|
|
params["blocks"][b]["ln_2"]["b"])
|
|
|
|
gpt.final_norm.scale = assign(gpt.final_norm.scale, params["g"])
|
|
gpt.final_norm.shift = assign(gpt.final_norm.shift, params["b"])
|
|
gpt.out_head.weight = assign(gpt.out_head.weight, params["wte"])
|
|
|
|
|
|
def text_to_token_ids(text, tokenizer):
|
|
encoded = tokenizer.encode(text, allowed_special={"<|endoftext|>"})
|
|
encoded_tensor = torch.tensor(encoded).unsqueeze(0) # add batch dimension
|
|
return encoded_tensor
|
|
|
|
|
|
def token_ids_to_text(token_ids, tokenizer):
|
|
flat = token_ids.squeeze(0) # remove batch dimension
|
|
return tokenizer.decode(flat.tolist())
|
|
|
|
|
|
def calc_loss_batch(input_batch, target_batch, model, device):
|
|
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
|
|
logits = model(input_batch)
|
|
loss = torch.nn.functional.cross_entropy(logits.flatten(0, 1), target_batch.flatten())
|
|
return loss
|
|
|
|
|
|
def calc_loss_loader(data_loader, model, device, num_batches=None):
|
|
total_loss = 0.
|
|
if len(data_loader) == 0:
|
|
return float("nan")
|
|
elif num_batches is None:
|
|
num_batches = len(data_loader)
|
|
else:
|
|
# Reduce the number of batches to match the total number of batches in the data loader
|
|
# if num_batches exceeds the number of batches in the data loader
|
|
num_batches = min(num_batches, len(data_loader))
|
|
for i, (input_batch, target_batch) in enumerate(data_loader):
|
|
if i < num_batches:
|
|
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
|
total_loss += loss.item()
|
|
else:
|
|
break
|
|
return total_loss / num_batches
|
|
|
|
|
|
def plot_losses(epochs_seen, tokens_seen, train_losses, val_losses):
|
|
fig, ax1 = plt.subplots(figsize=(5, 3))
|
|
|
|
# Plot training and validation loss against epochs
|
|
ax1.plot(epochs_seen, train_losses, label="Training loss")
|
|
ax1.plot(epochs_seen, val_losses, linestyle="-.", label="Validation loss")
|
|
ax1.set_xlabel("Epochs")
|
|
ax1.set_ylabel("Loss")
|
|
ax1.legend(loc="upper right")
|
|
ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) # only show integer labels on x-axis
|
|
|
|
# Create a second x-axis for tokens seen
|
|
ax2 = ax1.twiny() # Create a second x-axis that shares the same y-axis
|
|
ax2.plot(tokens_seen, train_losses, alpha=0) # Invisible plot for aligning ticks
|
|
ax2.set_xlabel("Tokens seen")
|
|
|
|
fig.tight_layout() # Adjust layout to make room
|
|
plt.savefig("loss-plot.pdf")
|
|
plt.show()
|