Print out embeddings for more illustrative learning (#481)

* print out embeddings for illustrative learning

* suggestion print embeddingcontents

---------

Co-authored-by: rasbt <mail@sebastianraschka.com>
This commit is contained in:
Henry Shi 2025-01-13 12:44:06 -08:00 committed by GitHub
parent bed5f89378
commit 15af754304

View File

@ -1788,7 +1788,10 @@
],
"source": [
"token_embeddings = token_embedding_layer(inputs)\n",
"print(token_embeddings.shape)"
"print(token_embeddings.shape)\n",
"\n",
"# uncomment & execute the following line to see how the embeddings look like\n",
"# print(token_embedding)"
]
},
{
@ -1807,7 +1810,10 @@
"outputs": [],
"source": [
"context_length = max_length\n",
"pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)"
"pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)\n",
"\n",
"# uncomment & execute the following line to see how the embedding layer weights look like\n",
"# print(pos_embedding_layer.weight)"
]
},
{
@ -1826,7 +1832,10 @@
],
"source": [
"pos_embeddings = pos_embedding_layer(torch.arange(max_length))\n",
"print(pos_embeddings.shape)"
"print(pos_embeddings.shape)\n",
"\n",
"# uncomment & execute the following line to see how the embeddings look like\n",
"# print(pos_embeddings)"
]
},
{
@ -1853,7 +1862,10 @@
],
"source": [
"input_embeddings = token_embeddings + pos_embeddings\n",
"print(input_embeddings.shape)"
"print(input_embeddings.shape)\n",
"\n",
"# uncomment & execute the following line to see how the embeddings look like\n",
"# print(input_embeddings)"
]
},
{