ouput -> output

This commit is contained in:
rasbt 2024-05-05 12:21:10 -05:00
parent b3215e3351
commit 6f486460bc
No known key found for this signature in database
GPG Key ID: 3C6E5C7C075611DB
4 changed files with 8 additions and 8 deletions

View File

@ -466,7 +466,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None):
for i, (input_batch, target_batch) in enumerate(data_loader):
if i < num_batches:
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, -1, :] # Logits of last ouput token
logits = model(input_batch)[:, -1, :] # Logits of last output token
predicted_labels = torch.argmax(logits, dim=-1)
num_examples += predicted_labels.shape[0]
@ -478,7 +478,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None):
def calc_loss_batch(input_batch, target_batch, model, device):
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, -1, :] # Logits of last ouput token
logits = model(input_batch)[:, -1, :] # Logits of last output token
loss = torch.nn.functional.cross_entropy(logits, target_batch)
return loss

View File

@ -139,7 +139,7 @@ def instantiate_model(choose_model, load_weights):
def calc_loss_batch(input_batch, target_batch, model, device, trainable_token=-1):
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token
logits = model(input_batch)[:, trainable_token, :] # Logits of last output token
loss = torch.nn.functional.cross_entropy(logits, target_batch)
return loss
@ -175,7 +175,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None, trainable
for i, (input_batch, target_batch) in enumerate(data_loader):
if i < num_batches:
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token
logits = model(input_batch)[:, trainable_token, :] # Logits of last output token
predicted_labels = torch.argmax(logits, dim=-1)
num_examples += predicted_labels.shape[0]

View File

@ -54,7 +54,7 @@ class IMDBDataset(Dataset):
def calc_loss_batch(input_batch, target_batch, model, device):
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
# logits = model(input_batch)[:, -1, :] # Logits of last ouput token
# logits = model(input_batch)[:, -1, :] # Logits of last output token
logits = model(input_batch).logits
loss = torch.nn.functional.cross_entropy(logits, target_batch)
return loss
@ -90,7 +90,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None):
for i, (input_batch, target_batch) in enumerate(data_loader):
if i < num_batches:
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
# logits = model(input_batch)[:, -1, :] # Logits of last ouput token
# logits = model(input_batch)[:, -1, :] # Logits of last output token
logits = model(input_batch).logits
predicted_labels = torch.argmax(logits, dim=1)
num_examples += predicted_labels.shape[0]

View File

@ -83,7 +83,7 @@ def instantiate_model(choose_model, load_weights):
def calc_loss_batch(input_batch, target_batch, model, device, trainable_token=-1):
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token
logits = model(input_batch)[:, trainable_token, :] # Logits of last output token
loss = torch.nn.functional.cross_entropy(logits, target_batch)
return loss
@ -119,7 +119,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None, trainable
for i, (input_batch, target_batch) in enumerate(data_loader):
if i < num_batches:
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token
logits = model(input_batch)[:, trainable_token, :] # Logits of last output token
predicted_labels = torch.argmax(logits, dim=-1)
num_examples += predicted_labels.shape[0]