Problem with quantumGAN

‘’‘I have some problems while running the quantum demo with my dataset that is composed by 20 images RGB (resized 64x64), so in input I have 20 (n.img) x64 (height) x64 (width)x3 (RGB) images. The error occurs when I launch the training cell: Recursion error: exceed limit (stack overflow). I tried to set the limit higher but it still does not work. I think the problem is when I run the descriminator (discriminator = Discriminator().to(device)). Someone can help me?’‘’

import math
import random
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import sklearn
from sklearn.preprocessing import MinMaxScaler
import pennylane as qml

seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)

#THIS PART OF TEXT READ THE DATASET, RESIZE IMAGES 64X64 AND STORE IN ARRAY
#‘class DigitsDataset(Dataset)’ OF THE DEMO IS REPLACED BY THE FOLLOWING CODE:
#I tried also with reduced dataset of 20 images instead of 200
ImgLocation=“C:/Users/elyon/OneDrive/Desktop/Tesi/dataset/”
CATEGORIES = set([“Dilbert”,“Boss”])
for category in CATEGORIES:
    for image in list(os.listdir(ImgLocation+category)):
       ImagePaths=ImagePaths+[ImgLocation+category+“/”+image]
data_lowres=[ ]
for img in ImagePaths:
     image = cv2.imread(img)
     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     image_lowres = cv2.resize(image, (64, 64))
     data_lowres.append(image_lowres)
data_lowres= np.array(data_lowres, dtype=“float32”) / 255.0
data_mng=data_lowres.reshape(data_lowres.shape[0], 64, 64, 3)
#the output shape is : 20 (n. of images),64,64,3 (RGB)
#LOADER DATASET
image_size = 8 # Height / width of the square images
batch_size = 1 #I tried also changing the batch size
dataloader = torch.utils.data.DataLoader(
data_mng, batch_size=batch_size, shuffle=True, drop_last=True
)
#DISCRIMINATOR (the same of demo)
class Discriminator(nn.Module):
“”“Fully connected classical discriminator”“”
def __init__(self):
    super().__init__()

    self.model = nn.Sequential(
        self.double() ,
        # Inputs to first hidden layer (num_input_features -> 64)
        nn.Linear(image_size * image_size, 64),
        nn.ReLU(),
        # First hidden layer (64 -> 16)
        nn.Linear(64, 16),
        nn.ReLU(),
        # Second hidden layer (16 -> output)
        nn.Linear(16, 1),
        nn.Sigmoid(),
    )

def forward(self, x):
    return self.model(x)
#quantum simulator is the same of demo
n_qubits = 5 # Total number of qubits / N
n_a_qubits = 1 # Number of ancillary qubits / N_A
q_depth = 6 # Depth of the parameterised quantum circuit / D
n_generators = 4 # Number of subgenerators for the patch method / N_G
dev = qml.device(“lightning.qubit”, wires=n_qubits)
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)

@qml.qnode(dev, interface=“torch”, diff_method=“parameter-shift”)
def quantum_circuit(noise, weights):
    weights = weights.reshape(q_depth, n_qubits)

# Initialise latent vectors
for i in range(n_qubits):
    qml.RY(noise[i], wires=i)

# Repeated layer
for i in range(q_depth):
    # Parameterised layer
    for y in range(n_qubits):
        qml.RY(weights[i][y], wires=y)

    # Control Z gates
    for y in range(n_qubits - 1):
        qml.CZ(wires=[y, y + 1])

return qml.probs(wires=list(range(n_qubits)))
def partial_measure(noise, weights):
# Non-linear Transform
probs = quantum_circuit(noise, weights)
probsgiven0 = probs[: (2 ** (n_qubits - n_a_qubits))]
probsgiven0 /= torch.sum(probs)
# Post-Processing
probsgiven = probsgiven0 / torch.max(probsgiven0)
return probsgiven
#GENERATOR the same of demo
class PatchQuantumGenerator(nn.Module):
“”“Quantum generator class for the patch method”“”
def __init__(self, n_generators, q_delta=1):
    """
    Args:
        n_generators (int): Number of sub-generators to be used in the patch method.
        q_delta (float, optional): Spread of the random distribution for parameter initialisation.
    """

    super().__init__()

    self.q_params = nn.ParameterList(
        [
            nn.Parameter(q_delta * torch.rand(q_depth * n_qubits), requires_grad=True)
            for _ in range(n_generators)
        ]
    )
    self.n_generators = n_generators

def forward(self, x):
    # Size of each sub-generator output
    patch_size = 2 ** (n_qubits - n_a_qubits)

    # Create a Tensor to 'catch' a batch of images from the for loop. x.size(0) is the batch size.
    images = torch.Tensor(x.size(0), 0).to(device)

    # Iterate over all sub-generators
    for params in self.q_params:

        # Create a Tensor to 'catch' a batch of the patches from a single sub-generator
        patches = torch.Tensor(0, patch_size).to(device)
        for elem in x:
            q_out = partial_measure(elem, params).float().unsqueeze(0)
            patches = torch.cat((patches, q_out))

        # Each batch of patches is concatenated with each other to create a batch of images
        images = torch.cat((images, patches), 1)

    return images
lrG = 0.3 # Learning rate for the generator
lrD = 0.01 # Learning rate for the discriminator
num_iter = 500 # Number of training iterations

#TRAINING->when I run this cell I get the error
#I replaced -1 with 3 because images are RGB colour(I tried also with -1 but it get the same error)
discriminator = Discriminator().to(device)
generator = PatchQuantumGenerator(n_generators).to(device)
criterion = nn.BCELoss()
optD=optim.SGD(discriminator.parameters(), lr=lrD)
optG = optim.SGD(generator.parameters(), lr=lrG)

real_labels = torch.full((batch_size,), 1.0, dtype=torch.float, device=device)
fake_labels = torch.full((batch_size,), 0.0, dtype=torch.float, device=device)
fixed_noise = torch.rand(8, n_qubits, device=device) * math.pi / 2
counter = 0
results=[]
while True:
#for i (data, _) in enumerate(dataloader):
for data in (dataloader):
# Data for training the discriminator
data = data.reshape(3, image_size * image_size)
real_data = data.to(device)
# Noise follwing a uniform distribution in range [0,pi/2)
    noise = torch.rand(batch_size, n_qubits, device=device) * math.pi / 2
    fake_data = generator(noise)

    # Training the discriminator
    discriminator.zero_grad()
    outD_real = discriminator(real_data).view(3)
    outD_fake = discriminator(fake_data.detach()).view(3)

    errD_real = criterion(outD_real, real_labels)
    errD_fake = criterion(outD_fake, fake_labels)
    # Propagate gradients
    errD_real.backward()
    errD_fake.backward()

    errD = errD_real + errD_fake
    optD.step()

    # Training the generator
    generator.zero_grad()
    outD_fake = discriminator(fake_data).view(3)
    errG = criterion(outD_fake, real_labels)
    errG.backward()
    optG.step()

    counter += 1

    # Show loss values
    if counter % 10 == 0:
        print(f'Iteration: {counter}, Discriminator Loss: {errD:0.3f}, Generator Loss: {errG:0.3f}')
        test_images = generator(fixed_noise).view(8,1,image_size,image_size).cpu().detach()

        # Save images every 50 iterations
        if counter % 50 == 0:
            results.append(test_images)

    if counter == num_iter:
        break
if counter == num_iter:
    break

#PLOT IMAGES
fig = plt.figure(figsize=(10, 5))
outer = gridspec.GridSpec(5, 2, wspace=0.1)

for i, images in enumerate(results):
inner = gridspec.GridSpecFromSubplotSpec(1, images.size(0),
subplot_spec=outer[i])
images = torch.squeeze(images, dim=1)
for j, im in enumerate(images):

    ax = plt.Subplot(fig, inner[j])
    ax.imshow(im.numpy(), cmap="gray")
    ax.set_xticks([])
    ax.set_yticks([])
    if j==0:
        ax.set_title(f'Iteration {50+i*50}', loc='left')
    fig.add_subplot(ax)
plt.show()

Hi @Eleonora_Panini,

It’s best if we continue the conversation on the other thread. :slight_smile:

Ok, but someone of thr community banned my other post that now is hidden. Why?

Hi @Eleonora_Panini,

I have removed the ban from your other post so we can continue the conversation there.