diff --git a/ch02/01_main-chapter-code/ch02.ipynb b/ch02/01_main-chapter-code/ch02.ipynb index 902e7d1..48761cb 100644 --- a/ch02/01_main-chapter-code/ch02.ipynb +++ b/ch02/01_main-chapter-code/ch02.ipynb @@ -1880,10 +1880,18 @@ "id": "8b3293a6-45a5-47cd-aa00-b23e3ca0a73f", "metadata": {}, "source": [ - "**See the [./dataloader.ipynb](./dataloader.ipynb) code notebook**, which is a concise version of the data loader that we implemented in this chapter and will need for training the GPT model in upcoming chapters.\n", + "See the [./dataloader.ipynb](./dataloader.ipynb) code notebook, which is a concise version of the data loader that we implemented in this chapter and will need for training the GPT model in upcoming chapters.\n", "\n", - "**See [./exercise-solutions.ipynb](./exercise-solutions.ipynb) for the exercise solutions.**" + "See [./exercise-solutions.ipynb](./exercise-solutions.ipynb) for the exercise solutions." ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6aa436b-c9ff-4f01-9a58-5c377b3700a5", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/ch03/01_main-chapter-code/ch03.ipynb b/ch03/01_main-chapter-code/ch03.ipynb index e1f0bbd..2c74134 100644 --- a/ch03/01_main-chapter-code/ch03.ipynb +++ b/ch03/01_main-chapter-code/ch03.ipynb @@ -2007,7 +2007,8 @@ "id": "fa3e4113-ffca-432c-b3ec-7a50bd15da25", "metadata": {}, "source": [ - "- See the [./multihead-attention.ipynb](./multihead-attention.ipynb) code notebook, which is a concise version of the data loader (chapter 2) plus the multi-head attention class that we implemented in this chapter and will need for training the GPT model in upcoming chapters" + "- See the [./multihead-attention.ipynb](./multihead-attention.ipynb) code notebook, which is a concise version of the data loader (chapter 2) plus the multi-head attention class that we implemented in this chapter and will need for training the GPT model in upcoming chapters\n", + "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)" ] } ], diff --git a/ch04/01_main-chapter-code/ch04.ipynb b/ch04/01_main-chapter-code/ch04.ipynb index 876d269..de2cd77 100644 --- a/ch04/01_main-chapter-code/ch04.ipynb +++ b/ch04/01_main-chapter-code/ch04.ipynb @@ -1480,6 +1480,17 @@ "- Note that the model is untrained; hence the random output texts above\n", "- We will train the model in the next chapter" ] + }, + { + "cell_type": "markdown", + "id": "a35278b6-9e5c-480f-83e5-011a1173648f", + "metadata": {}, + "source": [ + "## Summary and takeaways\n", + "\n", + "- See the [./gpt.py](./gpt.py) script, a self-contained script containing the GPT model we implement in this Jupyter notebook\n", + "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)" + ] } ], "metadata": { diff --git a/ch05/01_main-chapter-code/ch05.ipynb b/ch05/01_main-chapter-code/ch05.ipynb index 17138c4..4cfb388 100644 --- a/ch05/01_main-chapter-code/ch05.ipynb +++ b/ch05/01_main-chapter-code/ch05.ipynb @@ -2430,9 +2430,9 @@ "id": "fc7ed189-a633-458c-bf12-4f70b42684b8", "metadata": {}, "source": [ - "- See the [gpt_train.py](gpt_train.py) script containing a self-contained training script\n", - "- The [gpt_generate.py](gpt_generate.py) script loads pretrained weights from OpenAI and generates text based on a prompt\n", - "- You can find the exercise solutions in [exercise-solutions.ipynb](exercise-solutions.ipynb)" + "- See the [./gpt_train.py](./gpt_train.py) script, a self-contained script for training\n", + "- The [./gpt_generate.py](./gpt_generate.py) script loads pretrained weights from OpenAI and generates text based on a prompt\n", + "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)" ] } ], diff --git a/ch06/01_main-chapter-code/ch06.ipynb b/ch06/01_main-chapter-code/ch06.ipynb index 1d10ebc..647ab01 100644 --- a/ch06/01_main-chapter-code/ch06.ipynb +++ b/ch06/01_main-chapter-code/ch06.ipynb @@ -2345,7 +2345,9 @@ "id": "dafdc910-d616-47ab-aa85-f90c6e7ed80e", "metadata": {}, "source": [ - "- Interested readers can find an introduction to parameter-efficient training with low-rank adaptation (LoRA) in appendix E\n" + "- See the [./gpt_class_fintune.py](./gpt_class_fintune.py) script, a self-contained script for classification finetuning\n", + "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)\n", + "- In addition, interested readers can find an introduction to parameter-efficient training with low-rank adaptation (LoRA) in [appendix E](../../appendix-E)" ] } ], @@ -2370,7 +2372,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/ch07/01_main-chapter-code/ch07.ipynb b/ch07/01_main-chapter-code/ch07.ipynb index a522f09..fb820dc 100644 --- a/ch07/01_main-chapter-code/ch07.ipynb +++ b/ch07/01_main-chapter-code/ch07.ipynb @@ -2699,9 +2699,12 @@ "id": "f9853e7f-a81a-4806-9728-be1690807185" }, "source": [ - "## Summary\n", + "## Summary and takeaways\n", "\n", - "- No code in this section" + "- See the [./gpt_class_fintune.py](./gpt_class_fintune.py) script, a self-contained script for classification finetuning\n", + "- [./ollama_evaluate.py](./ollama_evaluate.py) is a standalonw script based on section 7.8 that evaluates a JSON file containing \"output\" and \"response\" keys via Ollama and Llama 3\n", + "- The [./load-finetuned-model.ipynb](./load-finetuned-model.ipynb) notebook illustrates how to load the finetuned model in a new session\n", + "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)" ] } ], @@ -2727,7 +2730,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.11.4" } }, "nbformat": 4,