mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2025-11-08 13:57:19 +00:00
100x -> 50x
This commit is contained in:
parent
0d48725b5c
commit
f3a2e93160
@ -776,7 +776,7 @@
|
|||||||
"id": "b8b6819e-ef7a-4f0d-841a-1b467496bef9"
|
"id": "b8b6819e-ef7a-4f0d-841a-1b467496bef9"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"- As we can see, we reduced the number of trainable parameters by almost 100x when using LoRA\n",
|
"- As we can see, we reduced the number of trainable parameters by almost 50x when using LoRA\n",
|
||||||
"- Let's now double-check whether the layers have been modified as intended by printing the model architecture"
|
"- Let's now double-check whether the layers have been modified as intended by printing the model architecture"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -1474,14 +1474,6 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"- As we can see based on the relatively high accuracy values above, the LoRA finetuning was successful"
|
"- As we can see based on the relatively high accuracy values above, the LoRA finetuning was successful"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "baa472da-44cf-42a9-8e59-6ddf7979bcd5",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user