Version of Python¶
!python -V
Python 3.12.6
Import Required Packages¶
# Suppress warnings
import warnings
for warn in [UserWarning, FutureWarning]: warnings.filterwarnings("ignore", category = warn)
import os
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
import jupyterlab as jlab
from dataclasses import dataclass
Versions of Required Libraries¶
packages = [
"Torch", "NumPy", "Pandas", "JupyterLab",
]
package_objects = [
torch, np, pd, jlab
]
versions = list(map(lambda obj: obj.__version__, package_objects))
pkgs = {"Package": packages, "Version": versions}
df_pkgs = pd.DataFrame(data = pkgs)
df_pkgs.index.name = "#"
df_pkgs.index += 1
display(df_pkgs)
path_to_reqs = "."
reqs_name = "requirements.txt"
def get_packages_and_versions():
"""Generate strings with libraries and their versions in the format: package==version"""
for package, version in zip(packages, versions):
yield f"{package.lower()}=={version}\n"
with open(os.path.join(path_to_reqs, reqs_name), "w", encoding = "utf-8") as f:
f.writelines(get_packages_and_versions())
Package | Version | |
---|---|---|
# | ||
1 | Torch | 2.2.2 |
2 | NumPy | 1.26.4 |
3 | Pandas | 2.2.3 |
4 | JupyterLab | 4.2.5 |
L1Loss (L1-Norm or Mean Absolute Error)¶
Regression
Measures the mean absolute difference between the predicted and true values of the neural network model.
# Create a L1Loss object
loss = nn.L1Loss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[0.5, -0.3, 1.2], [0.0, -1.5, 2.1]], requires_grad = True) # torch.randn(2, 3)
target = torch.tensor([[0.0, 0.0, 1.0], [0.0, 1.0, -1.0]]) # torch.randn(2, 3)
# Calculating the Loss Function
output = loss(input, target)
print("Mean Absolute Error:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Mean Absolute Error: 1.100000023841858 Gradients on input data: tensor([[ 0.1667, -0.1667, 0.1667], [ 0.0000, -0.1667, 0.1667]])
MSELoss (Mean Squared Error Loss)¶
Regression
Measure the Mean Squared Error between the predicted and true values of the neural network model.
# Create a MSELoss object
loss = nn.MSELoss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[0.5, -0.3, 1.2], [0.0, -1.5, 2.1]], requires_grad = True) # torch.randn(2, 3)
target = torch.tensor([[0.0, 0.0, 1.0], [0.0, 1.0, -1.0]]) # torch.randn(2, 3)
# Calculating the Loss Function
output = loss(input, target)
print("Mean Squared Error:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Mean Squared Error: 2.706666946411133 Gradients on input data: tensor([[ 0.1667, -0.1000, 0.0667], [ 0.0000, -0.8333, 1.0333]])
PoissonNLLLoss¶
Regression
Measures the negative logarithmic likelihood of the predicted values, assuming the target labels follow a Poisson distribution.
# Create a PoissonNLLLoss object
loss = nn.PoissonNLLLoss(log_input = True, full = False, eps = 1e-8, reduction = "mean")
# Generate input data and target labels
input = torch.tensor([[0.5, -0.3, 1.2], [0.0, -1.5, 2.1]], requires_grad = True) # torch.randn(2, 3)
target = torch.tensor([[0.0, 0.0, 1.0], [0.0, 1.0, -1.0]]) # torch.randn(2, 3)
# Calculating the Loss Function
output = loss(input, target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 2.916492462158203 Gradients on input data: tensor([[ 0.2748, 0.1235, 0.3867], [ 0.1667, -0.1295, 1.5277]])
GaussianNLLLoss¶
Regression
Measures the negative logarithmic likelihood of the predicted values and variance, assuming the target labels follow a normal distribution.
# Create a GaussianNLLLoss object
loss = nn.GaussianNLLLoss(full = False, eps = 1e-6, reduction = "mean")
# Generate input data and target labels
input = torch.tensor([[2.5, 0.0], [0.0, 1.0], [1.0, 2.0]], requires_grad = True)
target = torch.tensor([[3.0, 0.0], [0.0, 2.0], [1.0, 2.0]])
var = torch.tensor([[0.5, 0.2], [0.1, 0.3], [0.2, 0.3]])
# Calculating the Loss Function
output = loss(input, target, var)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: -0.39910173416137695 Gradients on input data: tensor([[-0.1667, 0.0000], [ 0.0000, -0.5556], [ 0.0000, 0.0000]])
KLDivLoss (Kullback-Leibler Divergence Loss)¶
Regression
Measure the Kullback-Leibler divergence between two probability distributions.
# Create a LogSoftmax object
m = nn.LogSoftmax(dim = 1)
# Create a KLDivLoss object
loss = nn.KLDivLoss(reduction = "batchmean")
# Generate input data and target labels
input = torch.tensor([[0.2, 0.3, 0.5], [0.1, 0.8, 0.1]], requires_grad = True)
target = torch.tensor([[0.1, 0.4, 0.5], [0.2, 0.7, 0.1]])
# Calculating the Loss Function
output = loss(m(input), target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.1021953895688057 Gradients on input data: tensor([[ 0.0947, -0.0401, -0.0547], [ 0.0246, -0.0991, 0.0746]])
CrossEntropyLoss¶
Classification
A combination of LogSoftmax and NLLLoss (Negative Log Likelihood Loss) to measure the difference between predicted class probabilities and true labels.
# Create a CrossEntropyLoss object
loss = nn.CrossEntropyLoss(reduction = "mean", label_smoothing = 0.0) # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[1.0, 2.0, 3.0], [1.0, 2.0, 1.0]], requires_grad = True) # torch.randn(2, 3)
target = torch.tensor([2, 1])
# Calculating the Loss Function
output = loss(input, target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.47952529788017273 Gradients on input data: tensor([[ 0.0450, 0.1224, -0.1674], [ 0.1060, -0.2119, 0.1060]])
NLLLoss¶
Classification
Measure the difference between the predicted class probabilities (after applying LogSoftmax) and the true labels.
# Create a LogSoftmax object
m = nn.LogSoftmax(dim = 1)
# Create a NLLLoss object
loss = nn.NLLLoss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[1.0, 2.0, 3.0], [1.0, 2.0, 1.0]], requires_grad = True) # torch.randn(2, 3)
target = torch.tensor([2, 1])
# Calculating the Loss Function
output = loss(m(input), target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.47952529788017273 Gradients on input data: tensor([[ 0.0450, 0.1224, -0.1674], [ 0.1060, -0.2119, 0.1060]])
BCELoss (Binary Cross Entropy Loss)¶
Binary Classification
Measure the difference between predicted probabilities and true binary labels using a cross-entropy loss function.
# Create a BCELoss object
loss = nn.BCELoss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[0.8], [0.2], [0.6]], requires_grad = True)
target = torch.tensor([[1.0], [0.0], [1.0]])
# Calculating the Loss Function
output = loss(input, target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.3190375566482544 Gradients on input data: tensor([[-0.4167], [ 0.4167], [-0.5556]])
BCEWithLogitsLoss¶
Binary Classification
Measures the difference between predicted logits and true binary labels, combining sigmoid and binary cross-entropy calculations.
# Create a BCEWithLogitsLoss object
loss = nn.BCEWithLogitsLoss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input = torch.tensor([[1.5], [-1.5], [0.0]], requires_grad = True)
target = torch.tensor([[1.0], [0.0], [1.0]])
# Calculating the Loss Function
output = loss(input, target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.36532461643218994 Gradients on input data: tensor([[-0.0608], [ 0.0608], [-0.1667]])
# Create a MarginRankingLoss object
loss = nn.MarginRankingLoss(reduction = "mean") # none | mean | sum
# Generate input data and target labels
input1 = torch.tensor([0.8, 0.7, -0.2], requires_grad = True)
input2 = torch.tensor([0.1, -0.1, 0.5], requires_grad = True)
target = torch.tensor([1, 2, 2])
# Calculating the Loss Function
output = loss(input1, input2, target)
print("Loss function value:", output.item())
# Backpropagation
output.backward()
print("Gradients on input data:", input.grad)
Loss function value: 0.46666666865348816 Gradients on input data: tensor([[-0.0608], [ 0.0608], [-0.1667]])