mirror of
				https://github.com/rasbt/LLMs-from-scratch.git
				synced 2025-11-03 19:30:26 +00:00 
			
		
		
		
	* try windows runners * update triggers * trigger with code file update * add new status badges
		
			
				
	
	
		
			109 lines
		
	
	
		
			4.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			109 lines
		
	
	
		
			4.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
 | 
						|
# Source for "Build a Large Language Model From Scratch"
 | 
						|
#   - https://www.manning.com/books/build-a-large-language-model-from-scratch
 | 
						|
# Code: https://github.com/rasbt/LLMs-from-scratch
 | 
						|
#
 | 
						|
# This file contains the relevant code from chapter 3 that is going to be used
 | 
						|
# in forthcoming chapters.
 | 
						|
 | 
						|
import torch
 | 
						|
import torch.nn as nn
 | 
						|
 | 
						|
 | 
						|
class CausalAttention(nn.Module):
 | 
						|
 | 
						|
    def __init__(self, d_in, d_out, context_length, dropout, qkv_bias=False):
 | 
						|
        super().__init__()
 | 
						|
        self.d_out = d_out
 | 
						|
        self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.dropout = nn.Dropout(dropout)  # New
 | 
						|
        self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))  # New
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        b, num_tokens, d_in = x.shape  # New batch dimension b
 | 
						|
        keys = self.W_key(x)
 | 
						|
        queries = self.W_query(x)
 | 
						|
        values = self.W_value(x)
 | 
						|
 | 
						|
        attn_scores = queries @ keys.transpose(1, 2)  # Changed transpose
 | 
						|
        attn_scores.masked_fill_(  # New, _ ops are in-place
 | 
						|
            self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
 | 
						|
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
 | 
						|
        attn_weights = self.dropout(attn_weights)  # New
 | 
						|
 | 
						|
        context_vec = attn_weights @ values
 | 
						|
        return context_vec
 | 
						|
 | 
						|
 | 
						|
class MultiHeadAttentionWrapper(nn.Module):
 | 
						|
 | 
						|
    def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
 | 
						|
        super().__init__()
 | 
						|
        self.heads = nn.ModuleList(
 | 
						|
            [CausalAttention(d_in, d_out, context_length, dropout, qkv_bias)
 | 
						|
             for _ in range(num_heads)]
 | 
						|
        )
 | 
						|
        self.out_proj = nn.Linear(d_out*num_heads, d_out*num_heads)
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        context_vec = torch.cat([head(x) for head in self.heads], dim=-1)
 | 
						|
        return self.out_proj(context_vec)
 | 
						|
 | 
						|
 | 
						|
class MultiHeadAttention(nn.Module):
 | 
						|
    def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
 | 
						|
        super().__init__()
 | 
						|
        assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
 | 
						|
 | 
						|
        self.d_out = d_out
 | 
						|
        self.num_heads = num_heads
 | 
						|
        self.head_dim = d_out // num_heads  # Reduce the projection dim to match desired output dim
 | 
						|
 | 
						|
        self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
 | 
						|
        self.out_proj = nn.Linear(d_out, d_out)  # Linear layer to combine head outputs
 | 
						|
        self.dropout = nn.Dropout(dropout)
 | 
						|
        self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
 | 
						|
 | 
						|
    def forward(self, x):
 | 
						|
        b, num_tokens, d_in = x.shape
 | 
						|
 | 
						|
        keys = self.W_key(x)  # Shape: (b, num_tokens, d_out)
 | 
						|
        queries = self.W_query(x)
 | 
						|
        values = self.W_value(x)
 | 
						|
 | 
						|
        # We implicitly split the matrix by adding a `num_heads` dimension
 | 
						|
        # Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
 | 
						|
        keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
 | 
						|
        values = values.view(b, num_tokens, self.num_heads, self.head_dim)
 | 
						|
        queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
 | 
						|
 | 
						|
        # Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
 | 
						|
        keys = keys.transpose(1, 2)
 | 
						|
        queries = queries.transpose(1, 2)
 | 
						|
        values = values.transpose(1, 2)
 | 
						|
 | 
						|
        # Compute scaled dot-product attention (aka self-attention) with a causal mask
 | 
						|
        attn_scores = queries @ keys.transpose(2, 3)  # Dot product for each head
 | 
						|
 | 
						|
        # Original mask truncated to the number of tokens and converted to boolean
 | 
						|
        mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
 | 
						|
 | 
						|
        # Use the mask to fill attention scores
 | 
						|
        attn_scores.masked_fill_(mask_bool, -torch.inf)
 | 
						|
 | 
						|
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
 | 
						|
        attn_weights = self.dropout(attn_weights)
 | 
						|
 | 
						|
        # Shape: (b, num_tokens, num_heads, head_dim)
 | 
						|
        context_vec = (attn_weights @ values).transpose(1, 2)
 | 
						|
 | 
						|
        # Combine heads, where self.d_out = self.num_heads * self.head_dim
 | 
						|
        context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
 | 
						|
        context_vec = self.out_proj(context_vec)  # optional projection
 | 
						|
 | 
						|
        return context_vec
 |