diff --git a/ch06/01_main-chapter-code/ch06.ipynb b/ch06/01_main-chapter-code/ch06.ipynb index 647ab01..d784657 100644 --- a/ch06/01_main-chapter-code/ch06.ipynb +++ b/ch06/01_main-chapter-code/ch06.ipynb @@ -2345,7 +2345,7 @@ "id": "dafdc910-d616-47ab-aa85-f90c6e7ed80e", "metadata": {}, "source": [ - "- See the [./gpt_class_fintune.py](./gpt_class_fintune.py) script, a self-contained script for classification finetuning\n", + "- See the [./gpt_class_finetune.py](./gpt_class_finetune.py) script, a self-contained script for classification finetuning\n", "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)\n", "- In addition, interested readers can find an introduction to parameter-efficient training with low-rank adaptation (LoRA) in [appendix E](../../appendix-E)" ] diff --git a/ch07/01_main-chapter-code/ch07.ipynb b/ch07/01_main-chapter-code/ch07.ipynb index fb820dc..1e1976b 100644 --- a/ch07/01_main-chapter-code/ch07.ipynb +++ b/ch07/01_main-chapter-code/ch07.ipynb @@ -2701,7 +2701,7 @@ "source": [ "## Summary and takeaways\n", "\n", - "- See the [./gpt_class_fintune.py](./gpt_class_fintune.py) script, a self-contained script for classification finetuning\n", + "- See the [./gpt_instruction_finetuning.py](./gpt_instruction_finetuning.py) script, a self-contained script for classification finetuning\n", "- [./ollama_evaluate.py](./ollama_evaluate.py) is a standalonw script based on section 7.8 that evaluates a JSON file containing \"output\" and \"response\" keys via Ollama and Llama 3\n", "- The [./load-finetuned-model.ipynb](./load-finetuned-model.ipynb) notebook illustrates how to load the finetuned model in a new session\n", "- You can find the exercise solutions in [./exercise-solutions.ipynb](./exercise-solutions.ipynb)"