import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
gadget = torch.gadget(‘cuda’ if torch.cuda.is_available() else ‘cpu’)
# Hyperparameters
latent_dim = 100
hidden_dim = 256
image_dim = 28 * 28 # MNIST
batch_size = 128
epochs = 50
lr = 0.0002
# MNIST Dataset
remodel = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_dataset = torchvision.datasets.MNIST(
root=’./knowledge’, prepare=True, remodel=remodel, obtain=True
)
train_loader = torch.utils.knowledge.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True
)
# Generator Community
class Generator(nn.Module):
def __init__(self, latent_dim, hidden_dim, image_dim):
tremendous(Generator, self).__init__()
self.web = nn.Sequential(
nn.Linear(latent_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim * 2),
nn.ReLU(),
nn.Linear(hidden_dim * 2, image_dim),
nn.Tanh()
)
def ahead(self, x):
return self.web(x)
# Discriminator Community
class Discriminator(nn.Module):
def __init__(self, image_dim, hidden_dim):
tremendous(Discriminator, self).__init__()
self.web = nn.Sequential(
nn.Linear(image_dim, hidden_dim * 2),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, 1),
nn.Sigmoid()
)
def ahead(self, x):
return self.web(x)
# Initialize networks
G = Generator(latent_dim, hidden_dim, image_dim).to(gadget)
D = Discriminator(image_dim, hidden_dim).to(gadget)
# Loss and optimizers
criterion = nn.BCELoss()
optimizer_G = optim.Adam(G.parameters(), lr=lr)
optimizer_D = optim.Adam(D.parameters(), lr=lr)
# Coaching statistics
losses_G = []
losses_D = []
img_list = []
# Coaching loop
for epoch in vary(epochs):
for i, (real_images, _) in enumerate(train_loader):
batch_size = real_images.measurement(0)
# Adversarial floor truths
real_labels = torch.ones(batch_size, 1).to(gadget)
fake_labels = torch.zeros(batch_size, 1).to(gadget)
# Configure actual photos
real_images = real_images.view(batch_size, -1).to(gadget)
# ========================
# Practice Discriminator (D)
# ========================
D.zero_grad()
# Actual photos loss
outputs_real = D(real_images)
loss_real = criterion(outputs_real, real_labels)
# Pretend photos loss
z = torch.randn(batch_size, latent_dim).to(gadget)
fake_images = G(z)
outputs_fake = D(fake_images.detach())
loss_fake = criterion(outputs_fake, fake_labels)
# Complete discriminator loss
loss_D = loss_real + loss_fake
loss_D.backward()
optimizer_D.step()
# ========================
# Practice Generator (G)
# ========================
G.zero_grad()
# Generator tries to idiot discriminator
outputs = D(fake_images)
loss_G = criterion(outputs, real_labels)
loss_G.backward()
optimizer_G.step()
# Save losses
if i % 200 == 0:
losses_G.append(loss_G.merchandise())
losses_D.append(loss_D.merchandise())
print(
f”Epoch [{epoch}/{epochs}] Batch {i}/{len(train_loader)} “
f”Loss D: {loss_D.merchandise():.4f} Loss G: {loss_G.merchandise():.4f}”
)
# Save generated photos
with torch.no_grad():
test_z = torch.randn(16, latent_dim).to(gadget)
generated = G(test_z).view(-1, 1, 28, 28).cpu()
img_list.append(generated)
# Plot coaching losses
plt.determine(figsize=(10,5))
plt.title(“Generator and Discriminator Loss”)
plt.plot(losses_G, label=”G”)
plt.plot(losses_D, label=”D”)
plt.xlabel(“Iterations”)
plt.ylabel(“Loss”)
plt.legend()
plt.present()
# Visualize generated photos
def show_images(photos, epoch):
fig, axes = plt.subplots(4, 4, figsize=(8,8))
for i, ax in enumerate(axes.flat):
ax.imshow(photos[i][0].detach().numpy() * 0.5 + 0.5, cmap=’grey’)
ax.axis(‘off’)
plt.suptitle(f”Epoch {epoch}”)
plt.present()
# Present development over epochs
for epoch in [0, 10, 20, 30, 40, 49]:
show_images(img_list[epoch], epoch)