162 lines
4.5 KiB
Plaintext
Raw Normal View History

2024-05-23 20:35:41 -05:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
2024-05-24 07:20:37 -05:00
"<table style=\"width:100%\">\n",
"<tr>\n",
"<td style=\"vertical-align:middle; text-align:left;\">\n",
"<font size=\"2\">\n",
"Supplementary code for the <a href=\"http://mng.bz/orYv\">Build a Large Language Model From Scratch</a> book by <a href=\"https://sebastianraschka.com\">Sebastian Raschka</a><br>\n",
"<br>Code repository: <a href=\"https://github.com/rasbt/LLMs-from-scratch\">https://github.com/rasbt/LLMs-from-scratch</a>\n",
"</font>\n",
"</td>\n",
"<td style=\"vertical-align:middle; text-align:left;\">\n",
"<a href=\"http://mng.bz/orYv\"><img src=\"https://sebastianraschka.com/images/LLMs-from-scratch-images/cover-small.webp\" width=\"100px\"></a>\n",
"</td>\n",
"</tr>\n",
"</table>"
2024-05-23 20:35:41 -05:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## FLOPS Analysis"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- FLOPs (Floating Point Operations Per Second) measure the computational complexity of neural network models by counting the number of floating-point operations executed\n",
"- High FLOPs indicate more intensive computation and energy consumption"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# pip install -r requirements-extra.txt"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"thop version: 0.1.1-2209072238\n",
"torch version: 2.2.2\n",
"tiktoken version: 0.5.1\n"
]
}
],
"source": [
"from importlib.metadata import version\n",
"\n",
"import matplotlib\n",
"import torch\n",
"\n",
"print(\"thop version:\", version(\"thop\"))\n",
"print(\"torch version:\", version(\"torch\"))"
2024-05-23 20:35:41 -05:00
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "GerIdRMXd6g9",
"outputId": "ccdd5c71-d221-4a84-f9bc-09557e77162d"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"gpt-small (124M) : 5.1e+11 FLOPS\n",
"gpt-medium (355M) : 1.4e+12 FLOPS\n",
"gpt-large (774M) : 3.2e+12 FLOPS\n",
"gpt-xl (1558M) : 6.4e+12 FLOPS\n"
]
}
],
"source": [
"import torch\n",
"from thop import profile\n",
"\n",
"from previous_chapters import GPTModel\n",
"\n",
"\n",
"BASE_CONFIG = {\n",
" \"vocab_size\": 50257, # Vocabulary size\n",
" \"context_length\": 1024, # Context length\n",
" \"drop_rate\": 0.0, # Dropout rate\n",
" \"qkv_bias\": True # Query-key-value bias\n",
"}\n",
"\n",
"model_configs = {\n",
" \"gpt-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
"}\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"input_tensor = torch.randint(0, 50257, (2, 1024)).to(device)\n",
"\n",
"for size in model_configs:\n",
" BASE_CONFIG.update(model_configs[size])\n",
" \n",
" model = GPTModel(BASE_CONFIG).bfloat16()\n",
" model.to(device)\n",
"\n",
" # MACS = multiply-accumulate operations\n",
" # MACS are typically counted as two FLOPS (one multiply and one accumulate)\n",
" macs, params = profile(model, inputs=(input_tensor,), verbose=False)\n",
" flops = 2*macs\n",
" print(f\"{size:18}: {flops:.1e} FLOPS\")\n",
" \n",
" del model\n",
" torch.cuda.empty_cache()"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "A100",
"machine_shape": "hm",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 4
}