|
| 1 | +import torch |
| 2 | +import torch.nn as nn |
| 3 | +import torch.optim as optim |
| 4 | +from torch.utils.data import DataLoader, TensorDataset |
| 5 | +import numpy as np |
| 6 | + |
| 7 | +# Define the Autoencoder model |
| 8 | +class Autoencoder(nn.Module): |
| 9 | + def __init__(self, input_dim, encoding_dim): |
| 10 | + super(Autoencoder, self).__init__() |
| 11 | + |
| 12 | + # Encoder |
| 13 | + self.encoder = nn.Sequential( |
| 14 | + nn.Linear(input_dim, 128), |
| 15 | + nn.ReLU(), |
| 16 | + nn.Linear(128, 64), |
| 17 | + nn.ReLU(), |
| 18 | + nn.Linear(64, encoding_dim), |
| 19 | + nn.ReLU() |
| 20 | + ) |
| 21 | + |
| 22 | + # Decoder |
| 23 | + self.decoder = nn.Sequential( |
| 24 | + nn.Linear(encoding_dim, 64), |
| 25 | + nn.ReLU(), |
| 26 | + nn.Linear(64, 128), |
| 27 | + nn.ReLU(), |
| 28 | + nn.Linear(128, input_dim), |
| 29 | + nn.Sigmoid() # Use sigmoid to ensure output is between 0 and 1 |
| 30 | + ) |
| 31 | + |
| 32 | + def forward(self, x): |
| 33 | + encoded = self.encoder(x) |
| 34 | + decoded = self.decoder(encoded) |
| 35 | + return decoded |
| 36 | + |
| 37 | +# Hyperparameters |
| 38 | +input_dim = 784 # Example for MNIST dataset (28x28 images) |
| 39 | +encoding_dim = 32 # Size of the encoded representation |
| 40 | +learning_rate = 0.001 |
| 41 | +num_epochs = 20 |
| 42 | +batch_size = 64 |
| 43 | + |
| 44 | +# Example data (replace with your dataset) |
| 45 | +# For demonstration, we'll use random data |
| 46 | +data = np.random.rand(1000, input_dim) # 1000 samples, each with 784 features |
| 47 | +dataset = TensorDataset(torch.tensor(data, dtype=torch.float32)) |
| 48 | +dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) |
| 49 | + |
| 50 | +# Initialize the model, loss function, and optimizer |
| 51 | +model = Autoencoder(input_dim, encoding_dim) |
| 52 | +criterion = nn.MSELoss() # Mean Squared Error Loss |
| 53 | +optimizer = optim.Adam(model.parameters(), lr=learning_rate) |
| 54 | + |
| 55 | +# Training loop |
| 56 | +for epoch in range(num_epochs): |
| 57 | + for batch in dataloader: |
| 58 | + # Get the input data |
| 59 | + inputs = batch[0] |
| 60 | + |
| 61 | + # Zero the gradients |
| 62 | + optimizer.zero_grad() |
| 63 | + |
| 64 | + # Forward pass |
| 65 | + outputs = model(inputs) |
| 66 | + |
| 67 | + # Compute the loss |
| 68 | + loss = criterion(outputs, inputs) |
| 69 | + |
| 70 | + # Backward pass and optimize |
| 71 | + loss.backward() |
| 72 | + optimizer.step() |
| 73 | + |
| 74 | + print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}') |
| 75 | + |
| 76 | +print("Training complete!") |
0 commit comments