mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2025-09-09 00:06:22 +00:00
Add new experiment without padding
This commit is contained in:
parent
b8451e5077
commit
04b9540938
@ -9,18 +9,19 @@ For example,
|
||||
|
||||
|
||||
|
||||
| | Model | Weights | Trainable token | Trainable layers | Context length | Training acc | Validation acc | Test acc | Training time | CPU/GPU |
|
||||
|----|--------------------|------------|-----------------|------------------|-------------------------|--------------|----------------|----------|---------------|---------|
|
||||
| 1 | gpt2-small (124M) | pretrained | last | last_block | longest train ex. (120) | 96.63% | 99.33% | 95.00% | 0.39 min | V100 |
|
||||
| 2 | gpt2-small (124M) | pretrained | first | last_block | longest train ex. (120) | 78.46% | 80.54% | 75.00% | 0.37 min | V100 |
|
||||
| 3 | gpt2-small (124M) | pretrained | last | last_layer | longest train ex. (120) | 78.65% | 79.87% | 72.00% | 0.33 min | V100 |
|
||||
| 4 | gpt2-small (124M) | pretrained | last | all | longest train ex. (120) | 99.62% | 96.64% | 96.67% | 0.94 min | V100 |
|
||||
| 5 | gpt2-medium (355M) | pretrained | last | last_block | longest train ex. (120) | 87.50% | 91.28% | 84.67% | 0.91 min | V100 |
|
||||
| 6 | gpt2-large (774M) | pretrained | last | last_block | longest train ex. (120) | 99.52% | 98.66% | 96.67% | 1.91 min | V100 |
|
||||
| 7 | gpt2-xl (1558M) | pretrained | last | last_block | longest train ex. (120) | 99.81% | 99.33% | 98.33% | 3.84 min | V100 |
|
||||
| 8 | gpt2-small (124M) | random | last | all | longest train ex. (120) | 100% | 96.64% | 93.67% | 0.93 min | V100 |
|
||||
| 9 | gpt2-small (124M) | pretrained | last | LoRA | longest train ex. (120) | 99.52% | 97.99% | 97.67% | 0.82 min | V100 |
|
||||
| 10 | gpt2-small (124M) | pretrained | last | last_block | context length (1024) | 83.08% | 87.92% | 78.33% | 3.24 min | V100 |
|
||||
| | Model | Weights | Trainable token | Trainable layers | Context length | Training acc | Validation acc | Test acc | Training time | CPU/GPU |
|
||||
| ---- | ------------------ | ---------- | --------------- | ---------------- | ----------------------- | ------------ | -------------- | -------- | ------------- | ------- |
|
||||
| 1 | gpt2-small (124M) | pretrained | last | last_block | longest train ex. (120) | 96.63% | 99.33% | 95.00% | 0.28 min | A100 |
|
||||
| 2 | gpt2-small (124M) | pretrained | first | last_block | longest train ex. (120) | 78.46% | 80.54% | 75.00% | 0.28 min | A100 |
|
||||
| 3 | gpt2-small (124M) | pretrained | last | last_layer | longest train ex. (120) | 78.65% | 79.87% | 72.00% | 0.25 min | A100 |
|
||||
| 4 | gpt2-small (124M) | pretrained | last | all | longest train ex. (120) | 99.62% | 96.64% | 96.67% | 0.69 min | A100 |
|
||||
| 5 | gpt2-medium (355M) | pretrained | last | last_block | longest train ex. (120) | 87.50% | 91.28% | 84.67% | 0.75 min | A100 |
|
||||
| 6 | gpt2-large (774M) | pretrained | last | last_block | longest train ex. (120) | 99.52% | 98.66% | 96.67% | 1.50 min | A100 |
|
||||
| 7 | gpt2-xl (1558M) | pretrained | last | last_block | longest train ex. (120) | 99.81% | 99.33% | 98.33% | 2.83 min | A100 |
|
||||
| 8 | gpt2-small (124M) | random | last | all | longest train ex. (120) | 100% | 96.64% | 93.67% | 0.69 min | A100 |
|
||||
| 9 | gpt2-small (124M) | pretrained | last | LoRA | longest train ex. (120) | 99.52% | 97.99% | 97.67% | 0.75 min | A100 |
|
||||
| 10 | gpt2-small (124M) | pretrained | last | last_block | context length (1024) | 83.08% | 87.92% | 78.33% | 2.46 min | A100 |
|
||||
| 11 | gpt2-small (124M) | pretrained | last | last_block | variable: no padding | 97.42% | 95.30% | 95.00% | 1.71 min | A100 |
|
||||
|
||||
|
||||
|
||||
@ -39,6 +40,7 @@ You can use the following code to reproduce the experiments:
|
||||
- Row 8: `python additional-experiments.py --weights random --trainable_layers all`
|
||||
- Row 9: `python additional-experiments.py --trainable_layers lora --lora_rank 16 --lora_alpha 8`
|
||||
- Row 10: `python additional-experiments.py --context_length "model_context_length"`
|
||||
- Row 11: `python additional-experiments.py --no_padding`
|
||||
|
||||
I've kept the LLM and dataset small on purpose, so you can run the training on a regular laptop like a MacBook Air M3 in about 15 minutes in case you don't have access to a GPU.
|
||||
|
||||
@ -59,3 +61,5 @@ I've kept the LLM and dataset small on purpose, so you can run the training on a
|
||||
6. **Using LoRA (Low-Rank Adaptation) vs Training All Layers (Row 9 vs. 4)**: Keeping the model frozen and adding trainable LoRA layers (see [Appendix E](../../appendix-E/01_main-chapter-code/appendix-E.ipynb) for details) is a viable alternative to training all model parameters and even improves the performance by 1% point. As it can be seen by the 1% lower gap between the training and validation accuracy when using LoRA, this is likely due to less overfitting. Moreover, using LoRA is also slightly faster because fewer parameters have to be updated.
|
||||
|
||||
7. **Padding Input to Full Context Length vs. Longest Training Example (Row 1 vs. 10)**: Padding the input to the full supported context length results is significantly worse.
|
||||
|
||||
8. **Padding vs no padding (Row 1 vs 11)**: The `--no_padding` option disables the padding in the dataset and trains the model with a batch size of 1 where the inputs have variable lengths. This results in exactly the same test set accuracy but takes substantially longer to train.
|
||||
|
@ -46,7 +46,7 @@ class LinearWithLoRA(torch.nn.Module):
|
||||
|
||||
|
||||
class SpamDataset(Dataset):
|
||||
def __init__(self, csv_file, tokenizer, max_length=None, pad_token_id=50256):
|
||||
def __init__(self, csv_file, tokenizer, max_length=None, pad_token_id=50256, skip_padding=False):
|
||||
self.data = pd.read_csv(csv_file)
|
||||
self.max_length = max_length if max_length is not None else self._longest_encoded_length(tokenizer)
|
||||
|
||||
@ -55,11 +55,13 @@ class SpamDataset(Dataset):
|
||||
tokenizer.encode(text)[:self.max_length]
|
||||
for text in self.data["Text"]
|
||||
]
|
||||
# Pad sequences to the longest sequence
|
||||
self.encoded_texts = [
|
||||
et + [pad_token_id] * (self.max_length - len(et))
|
||||
for et in self.encoded_texts
|
||||
]
|
||||
|
||||
if skip_padding:
|
||||
# Pad sequences to the longest sequence
|
||||
self.encoded_texts = [
|
||||
et + [pad_token_id] * (self.max_length - len(et))
|
||||
for et in self.encoded_texts
|
||||
]
|
||||
|
||||
def __getitem__(self, index):
|
||||
encoded = self.encoded_texts[index]
|
||||
@ -334,6 +336,23 @@ if __name__ == "__main__":
|
||||
"The LoRA alpha value when choosing `--trainable_layers lora`"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no_padding",
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=(
|
||||
"Enable no padding. When this flag is set it will train"
|
||||
" the model with a batch size of 1 and no padding."
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_epochs",
|
||||
type=int,
|
||||
default=5,
|
||||
help=(
|
||||
"Number of training epochs."
|
||||
)
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@ -411,26 +430,35 @@ if __name__ == "__main__":
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
train_dataset = None
|
||||
if args.context_length == "model_context_length":
|
||||
max_length = model.pos_emb.weight.shape[0]
|
||||
elif args.context_length == "longest_training_example":
|
||||
train_dataset = SpamDataset(base_path / "train.csv", max_length=None, tokenizer=tokenizer)
|
||||
max_length = train_dataset.max_length
|
||||
|
||||
if args.no_padding:
|
||||
max_length = None
|
||||
|
||||
else:
|
||||
try:
|
||||
max_length = int(args.context_length)
|
||||
except ValueError:
|
||||
raise ValueError("Invalid --context_length argument")
|
||||
if args.context_length == "model_context_length":
|
||||
max_length = model.pos_emb.weight.shape[0]
|
||||
elif args.context_length == "longest_training_example":
|
||||
train_dataset = SpamDataset(base_path / "train.csv", max_length=None, tokenizer=tokenizer)
|
||||
max_length = train_dataset.max_length
|
||||
else:
|
||||
try:
|
||||
max_length = int(args.context_length)
|
||||
except ValueError:
|
||||
raise ValueError("Invalid --context_length argument")
|
||||
|
||||
if train_dataset is None:
|
||||
train_dataset = SpamDataset(base_path / "train.csv", max_length=max_length, tokenizer=tokenizer)
|
||||
val_dataset = SpamDataset(base_path / "validation.csv", max_length=max_length, tokenizer=tokenizer)
|
||||
test_dataset = SpamDataset(base_path / "test.csv", max_length=max_length, tokenizer=tokenizer)
|
||||
train_dataset = SpamDataset(base_path / "train.csv", max_length=max_length, tokenizer=tokenizer, skip_padding=args.no_padding)
|
||||
val_dataset = SpamDataset(base_path / "validation.csv", max_length=max_length, tokenizer=tokenizer, skip_padding=args.no_padding)
|
||||
test_dataset = SpamDataset(base_path / "test.csv", max_length=max_length, tokenizer=tokenizer, skip_padding=args.no_padding)
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
num_workers = 0
|
||||
batch_size = 8
|
||||
|
||||
if args.no_padding:
|
||||
batch_size = 1
|
||||
else:
|
||||
batch_size = 8
|
||||
|
||||
train_loader = DataLoader(
|
||||
dataset=train_dataset,
|
||||
@ -462,10 +490,9 @@ if __name__ == "__main__":
|
||||
torch.manual_seed(123)
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, weight_decay=0.1)
|
||||
|
||||
num_epochs = 5
|
||||
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
|
||||
model, train_loader, val_loader, optimizer, device,
|
||||
num_epochs=num_epochs, eval_freq=50, eval_iter=5,
|
||||
num_epochs=args.num_epochs, eval_freq=50, eval_iter=5,
|
||||
tokenizer=tokenizer, max_steps=None, trainable_token=args.trainable_token
|
||||
)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user