diff --git a/appendix-E/01_main-chapter-code/appendix-E.ipynb b/appendix-E/01_main-chapter-code/appendix-E.ipynb index 16a94b8..b9f02c9 100644 --- a/appendix-E/01_main-chapter-code/appendix-E.ipynb +++ b/appendix-E/01_main-chapter-code/appendix-E.ipynb @@ -776,7 +776,7 @@ "id": "b8b6819e-ef7a-4f0d-841a-1b467496bef9" }, "source": [ - "- As we can see, we reduced the number of trainable parameters by almost 100x when using LoRA\n", + "- As we can see, we reduced the number of trainable parameters by almost 50x when using LoRA\n", "- Let's now double-check whether the layers have been modified as intended by printing the model architecture" ] }, @@ -1474,14 +1474,6 @@ "source": [ "- As we can see based on the relatively high accuracy values above, the LoRA finetuning was successful" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "baa472da-44cf-42a9-8e59-6ddf7979bcd5", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {