This is the full code, I tried to reduce the dataset from 200 images RGB (100 for each classes) to 20 and the output have the same problem when I launch the training cell (RecursionError: maximum recursion depth exceeded while calling a Python object). I provide you the part of code that I modified in order to adapt the demo to my code/dataset: look at the respective comments on the code below. Do you have any idea about how to solve the problem?
I appreciate your help, and thank you in advance.
import math
import random
import numpy as np
#import pandas as pd
import cv2
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
#import pennylane as qml
import sys
import os
Pytorch imports
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import sklearn
from sklearn.preprocessing import MinMaxScaler
#%%
import pennylane as qml
#%%
Set the random seed for reproducibility
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
THIS PART OF TEXT READ THE DATASET, RESIZE IMAGES 64X64 AND STORE IN ARRAY
#‘class DigitsDataset(Dataset)’ OF THE DEMO IS REPLACED BY THE FOLLOWING CODE:
ImgLocation=“C:/Users/elyon/OneDrive/Desktop/Tesi/dataset/”
List image categories we are interested in
CATEGORIES = set([“Dilbert”,“Boss”])
Create a list to store image paths
ImagePaths=
for category in CATEGORIES:
for image in list(os.listdir(ImgLocation+category)):
ImagePaths=ImagePaths+[ImgLocation+category+“/”+image]
Load images and resize to 200x200
data_lowres=
for img in ImagePaths:
image = cv2.imread(img)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_lowres = cv2.resize(image, (64, 64))
data_lowres.append(image_lowres)
Convert image data to numpy array and standardize values (divide by 255 since RGB values ranges from 0 to 255)
data_lowres = np.array(data_lowres, dtype=“float32”) / 255.0
data_mng=data_lowres.reshape(data_lowres.shape[0], 64, 64, 3)
Show data shape
print("Shape of data_lowres: ", data_lowres.shape)
print("Shape of the scaled array: ", data_mng.shape)
#%%
#data = data_lowres.astype(np.float32).reshape(64, 64)
**** LOADER DATASET
#%%
image_size = 8 # Height / width of the square images
batch_size = 1
dataloader = torch.utils.data.DataLoader(
data_mng, batch_size=batch_size, shuffle=True, drop_last=True
)
DISCRIMINATOR (the same of demo)
class Discriminator(nn.Module):
“”“Fully connected classical discriminator”“”
def __init__(self):
super().__init__()
self.model = nn.Sequential(
self.double() ,
# Inputs to first hidden layer (num_input_features -> 64)
nn.Linear(image_size * image_size, 64),
nn.ReLU(),
# First hidden layer (64 -> 16)
nn.Linear(64, 16),
nn.ReLU(),
# Second hidden layer (16 -> output)
nn.Linear(16, 1),
nn.Sigmoid(),
)
def forward(self, x):
return self.model(x)
#%%
quantum simulator is the same of demo
Quantum variables
n_qubits = 5 # Total number of qubits / N
n_a_qubits = 1 # Number of ancillary qubits / N_A
q_depth = 6 # Depth of the parameterised quantum circuit / D
n_generators = 4 # Number of subgenerators for the patch method / N_G
Quantum simulator
dev = qml.device(“lightning.qubit”, wires=n_qubits)
Enable CUDA device if available
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
#%%
@qml.qnode(dev, interface=“torch”, diff_method=“parameter-shift”)
def quantum_circuit(noise, weights):
weights = weights.reshape(q_depth, n_qubits)
# Initialise latent vectors
for i in range(n_qubits):
qml.RY(noise[i], wires=i)
# Repeated layer
for i in range(q_depth):
# Parameterised layer
for y in range(n_qubits):
qml.RY(weights[i][y], wires=y)
# Control Z gates
for y in range(n_qubits - 1):
qml.CZ(wires=[y, y + 1])
return qml.probs(wires=list(range(n_qubits)))
def partial_measure(noise, weights):
# Non-linear Transform
probs = quantum_circuit(noise, weights)
probsgiven0 = probs[: (2 ** (n_qubits - n_a_qubits))]
probsgiven0 /= torch.sum(probs)
# Post-Processing
probsgiven = probsgiven0 / torch.max(probsgiven0)
return probsgiven
#%%
GENERATOR the same of demo
class PatchQuantumGenerator(nn.Module):
“”“Quantum generator class for the patch method”“”
def __init__(self, n_generators, q_delta=1):
"""
Args:
n_generators (int): Number of sub-generators to be used in the patch method.
q_delta (float, optional): Spread of the random distribution for parameter initialisation.
"""
super().__init__()
self.q_params = nn.ParameterList(
[
nn.Parameter(q_delta * torch.rand(q_depth * n_qubits), requires_grad=True)
for _ in range(n_generators)
]
)
self.n_generators = n_generators
def forward(self, x):
# Size of each sub-generator output
patch_size = 2 ** (n_qubits - n_a_qubits)
# Create a Tensor to 'catch' a batch of images from the for loop. x.size(0) is the batch size.
images = torch.Tensor(x.size(0), 0).to(device)
# Iterate over all sub-generators
for params in self.q_params:
# Create a Tensor to 'catch' a batch of the patches from a single sub-generator
patches = torch.Tensor(0, patch_size).to(device)
for elem in x:
q_out = partial_measure(elem, params).float().unsqueeze(0)
patches = torch.cat((patches, q_out))
# Each batch of patches is concatenated with each other to create a batch of images
images = torch.cat((images, patches), 1)
return images
#%%
lrG = 0.3 # Learning rate for the generator
lrD = 0.01 # Learning rate for the discriminator
num_iter = 500 # Number of training iterations
#sys.setrecursionlimit(10000)
print(sys.getrecursionlimit())
#%%
TRAINING->when I run this cell I get the error
#I replaced -1 with 3 because images are RGB colour
discriminator = Discriminator().to(device)
generator = PatchQuantumGenerator(n_generators).to(device)
Binary cross entropy
criterion = nn.BCELoss()
Optimisers
optD = optim.SGD(discriminator.parameters(), lr=lrD)
optG = optim.SGD(generator.parameters(), lr=lrG)
real_labels = torch.full((batch_size,), 1.0, dtype=torch.float, device=device)
fake_labels = torch.full((batch_size,), 0.0, dtype=torch.float, device=device)
Fixed noise allows us to visually track the generated images throughout training
fixed_noise = torch.rand(8, n_qubits, device=device) * math.pi / 2
Iteration counter
counter = 0
Collect images for plotting later
results =
while True:
#for i (data, _) in enumerate(dataloader):
for data in (dataloader):
# Data for training the discriminator
data = data.reshape(3, image_size * image_size)
real_data = data.to(device)
# Noise follwing a uniform distribution in range [0,pi/2)
noise = torch.rand(batch_size, n_qubits, device=device) * math.pi / 2
fake_data = generator(noise)
# Training the discriminator
discriminator.zero_grad()
outD_real = discriminator(real_data).view(3)
outD_fake = discriminator(fake_data.detach()).view(3)
errD_real = criterion(outD_real, real_labels)
errD_fake = criterion(outD_fake, fake_labels)
# Propagate gradients
errD_real.backward()
errD_fake.backward()
errD = errD_real + errD_fake
optD.step()
# Training the generator
generator.zero_grad()
outD_fake = discriminator(fake_data).view(3)
errG = criterion(outD_fake, real_labels)
errG.backward()
optG.step()
counter += 1
# Show loss values
if counter % 10 == 0:
print(f'Iteration: {counter}, Discriminator Loss: {errD:0.3f}, Generator Loss: {errG:0.3f}')
test_images = generator(fixed_noise).view(8,1,image_size,image_size).cpu().detach()
# Save images every 50 iterations
if counter % 50 == 0:
results.append(test_images)
if counter == num_iter:
break
if counter == num_iter:
break
#%%
PLOT IMAGES
‘’’
fig = plt.figure(figsize=(10, 5))
outer = gridspec.GridSpec(5, 2, wspace=0.1)
for i, images in enumerate(results):
inner = gridspec.GridSpecFromSubplotSpec(1, images.size(0),
subplot_spec=outer[i])
images = torch.squeeze(images, dim=1)
for j, im in enumerate(images):
ax = plt.Subplot(fig, inner[j])
ax.imshow(im.numpy(), cmap="gray")
ax.set_xticks([])
ax.set_yticks([])
if j==0:
ax.set_title(f'Iteration {50+i*50}', loc='left')
fig.add_subplot(ax)
plt.show()
‘’’