Version of Python¶

In [1]:
!python -V
Python 3.12.6

Import Required Packages¶

In [2]:
# Suppress warnings
import warnings
for warn in [UserWarning, FutureWarning]: warnings.filterwarnings("ignore", category = warn)

import os
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
import jupyterlab as jlab

from dataclasses import dataclass

Versions of Required Libraries¶

In [3]:
packages = [
    "Torch", "NumPy", "Pandas", "JupyterLab",
]

package_objects = [
    torch, np, pd, jlab
]

versions = list(map(lambda obj: obj.__version__, package_objects))

pkgs = {"Package": packages, "Version": versions}
df_pkgs = pd.DataFrame(data = pkgs)
df_pkgs.index.name = "#"
df_pkgs.index += 1

display(df_pkgs)

path_to_reqs = "."
reqs_name = "requirements.txt"

def get_packages_and_versions():
    """Generate strings with libraries and their versions in the format: package==version"""
    
    for package, version in zip(packages, versions):
        yield f"{package.lower()}=={version}\n"

with open(os.path.join(path_to_reqs, reqs_name), "w", encoding = "utf-8") as f:
    f.writelines(get_packages_and_versions())
Package Version
#
1 Torch 2.2.2
2 NumPy 1.26.4
3 Pandas 2.2.3
4 JupyterLab 4.2.5

Example of a Deep Neural Network¶

In [4]:
@dataclass
class DNN(nn.Module):
    input_size: int # Input layer
    hidden_sizes: list[int] # List of hidden layer sizes
    output_size: int # Output layer

    # Called immediately after __init__ when using dataclass
    def __post_init__(self):
        super(DNN, self).__init__()

        # Create a list to store all layers
        self.layers = nn.ModuleList()

        # Adding the first hidden layer
        self.layers.append(nn.Linear(self.input_size, self.hidden_sizes[0]))

        # Adding the remaining hidden layers
        for i in range(1, len(self.hidden_sizes)):
            self.layers.append(nn.Linear(self.hidden_sizes[i - 1], self.hidden_sizes[i]))
        
        # Adding an output layer
        self.layers.append(nn.Linear(self.hidden_sizes[-1], self.output_size))

        # Determine the activation function for the hidden layers
        self.g = nn.ReLU()

    # Direct data flow through the neural network
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # Pass through all hidden layers with activation function
        for layer in self.layers[:-1]:
            x = self.g(layer(x))

        # Pass through the output layer
        x = self.layers[-1](x)

        return x

# Check for GPU availability
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Create a model instance
model = DNN(input_size = 10, hidden_sizes = [20, 30, 20], output_size = 5).to(device)

# Create a random tensor
input = torch.randn(2, model.input_size).to(device)

# Run through the model
output = model(input)

# Output of the resulting tensor after passing through the model
print("Resulting tensor after passing through the model:")
print(output)

# Output the size of the resulting tensor
print("Size of the resulting tensor:")
print(output.size())
Resulting tensor after passing through the model:
tensor([[-0.0697, -0.0728, -0.1142, -0.0601, -0.0298],
        [-0.0132, -0.1072, -0.0698, -0.0640, -0.0348]],
       grad_fn=<AddmmBackward0>)
Size of the resulting tensor:
torch.Size([2, 5])