mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2025-11-01 10:20:00 +00:00
Einsum multi-head attention (#345)
* Einsum multi-head attention * update diff
This commit is contained in:
parent
190910e3d6
commit
9bb203b1b7
@ -2,4 +2,25 @@
|
||||
|
||||
- [mha-implementations.ipynb](mha-implementations.ipynb) contains and compares different implementations of multi-head attention
|
||||
|
||||
<a href="mha-implementations.ipynb"><img src="https://sebastianraschka.com/images/LLMs-from-scratch-images/bonus/mha-benchmark/mha-comparison.webp" width="500px"></a>
|
||||
|
||||
|
||||
### Summary
|
||||
|
||||
The figures below summarize the performance benchmarks (lower is better).
|
||||
|
||||
|
||||
|
||||
#### Forward pass only
|
||||
|
||||
<a href="mha-implementations.ipynb"><img src="https://sebastianraschka.com/images/LLMs-from-scratch-images/bonus/mha-benchmark/1_forward-only.webp?1" width="500px"></a>
|
||||
|
||||
|
||||
#### Forward and backward pass
|
||||
|
||||
<a href="mha-implementations.ipynb"><img src="https://sebastianraschka.com/images/LLMs-from-scratch-images/bonus/mha-benchmark/2_forward-and-backward.webp?1" width="500px"></a>
|
||||
|
||||
|
||||
#### Forward and backward pass after compilation
|
||||
|
||||
<a href="mha-implementations.ipynb"><img src="https://sebastianraschka.com/images/LLMs-from-scratch-images/bonus/mha-benchmark/3_forward-and-backward-compiled.webp?1" width="500px"></a>
|
||||
|
||||
|
||||
@ -1,108 +0,0 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
#
|
||||
# This file contains the relevant code from chapter 3 that is going to be used
|
||||
# in forthcoming chapters.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class CausalAttention(nn.Module):
|
||||
|
||||
def __init__(self, d_in, d_out, context_length, dropout, qkv_bias=False):
|
||||
super().__init__()
|
||||
self.d_out = d_out
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.dropout = nn.Dropout(dropout) # New
|
||||
self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1)) # New
|
||||
|
||||
def forward(self, x):
|
||||
b, num_tokens, d_in = x.shape # New batch dimension b
|
||||
keys = self.W_key(x)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
attn_scores = queries @ keys.transpose(1, 2) # Changed transpose
|
||||
attn_scores.masked_fill_( # New, _ ops are in-place
|
||||
self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
|
||||
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
|
||||
attn_weights = self.dropout(attn_weights) # New
|
||||
|
||||
context_vec = attn_weights @ values
|
||||
return context_vec
|
||||
|
||||
|
||||
class MultiHeadAttentionWrapper(nn.Module):
|
||||
|
||||
def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
|
||||
super().__init__()
|
||||
self.heads = nn.ModuleList(
|
||||
[CausalAttention(d_in, d_out, context_length, dropout, qkv_bias)
|
||||
for _ in range(num_heads)]
|
||||
)
|
||||
self.out_proj = nn.Linear(d_out*num_heads, d_out*num_heads)
|
||||
|
||||
def forward(self, x):
|
||||
context_vec = torch.cat([head(x) for head in self.heads], dim=-1)
|
||||
return self.out_proj(context_vec)
|
||||
|
||||
|
||||
class MultiHeadAttention(nn.Module):
|
||||
def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
|
||||
super().__init__()
|
||||
assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
|
||||
|
||||
self.d_out = d_out
|
||||
self.num_heads = num_heads
|
||||
self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
|
||||
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
|
||||
|
||||
def forward(self, x):
|
||||
b, num_tokens, d_in = x.shape
|
||||
|
||||
keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
# We implicitly split the matrix by adding a `num_heads` dimension
|
||||
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
|
||||
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
|
||||
# Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
|
||||
keys = keys.transpose(1, 2)
|
||||
queries = queries.transpose(1, 2)
|
||||
values = values.transpose(1, 2)
|
||||
|
||||
# Compute scaled dot-product attention (aka self-attention) with a causal mask
|
||||
attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
|
||||
|
||||
# Original mask truncated to the number of tokens and converted to boolean
|
||||
mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
|
||||
|
||||
# Use the mask to fill attention scores
|
||||
attn_scores.masked_fill_(mask_bool, -torch.inf)
|
||||
|
||||
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
|
||||
attn_weights = self.dropout(attn_weights)
|
||||
|
||||
# Shape: (b, num_tokens, num_heads, head_dim)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
|
||||
# Combine heads, where self.d_out = self.num_heads * self.head_dim
|
||||
context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
|
||||
context_vec = self.out_proj(context_vec) # optional projection
|
||||
|
||||
return context_vec
|
||||
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user