Hi Ivana, thanks for the welcome. Sorry about the messy code, I’m not sure how best to format things for this forum. I’ve tidied it up a bit and removed some things but there isn’t a whole lot more that I can remove as the problem is quite specific:
“”"
Pennylane QCNN example on MNIST
Trying to compare performance vs qiskit (qiskit implementation in seperate script)
“”"
Necessary imports
import numpy as np
import matplotlib.pyplot as plt
from torch import Tensor
from torch.nn import Linear, CrossEntropyLoss, MSELoss
from torch.optim import LBFGS
import torch
from torchsummary import summary
from torch import cat, no_grad, manual_seed
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss,
MaxPool2d,
Flatten,
Sequential,
ReLU,
)
import torch.nn.functional as F
import pennylane as qml
from pennylane.templates import AngleEmbedding, BasicEntanglerLayers
Train Dataset
-------------
manual_seed(42)
batch_size = 10
n_samples = 500
X_train = datasets.MNIST(
root=“./data”, train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
idx = np.append(
np.where(X_train.targets == 0)[0][:n_samples], np.where(X_train.targets == 1)[0][:n_samples]
)
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
train_loader = DataLoader(X_train, batch_size=batch_size, shuffle=True)
Test Dataset
-------------
n_samples = 250
X_test = datasets.MNIST(
root=“./data”, train=False, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
idx = np.append(
np.where(X_test.targets == 0)[0][:n_samples], np.where(X_test.targets == 1)[0][:n_samples]
)
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
test_loader = DataLoader(X_test, batch_size=batch_size, shuffle=True)
n_qubits = 2
dev = qml.device(“default.qubit”, wires=n_qubits)
Simple parameterized circuit
@qml.qnode(dev)
def qnode(inputs, weights):
‘’‘for i in range(n_qubits):
‘’’‘’‘qml.RX(weights[i],wires=i)
‘’’‘’'qml.RX(weights[i+2],wires=i)
‘’'qml.CNOT(wires=[0, 1])
‘’'return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
n_layers = 1
n_params = 4
weight_shapes = {“weights”: (n_params,)}
qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)
Define torch NN module
class Net(Module):
def init(self):
super().init()
self.conv1 = Conv2d(1, 2, kernel_size=5)
self.conv2 = Conv2d(2, 16, kernel_size=5)
self.dropout = Dropout2d()
self.fc1 = Linear(256, 64)
self.fc2 = Linear(64, 2)
self.qlayer_1 = qml.qnn.TorchLayer(qnode, weight_shapes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
quantum_output = self.qlayer_1(x)
concatenated_output = torch.cat((x, quantum_output), dim=1)
return concatenated_output
model4 = Net()
Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = CrossEntropyLoss()
Start training
epochs = 10
loss_list = [ ]
model4.train()
for epoch in range(epochs):
‘’‘total_loss = [ ]
‘’‘for batch_idx, (data, target) in enumerate(train_loader):
‘’’’‘‘optimizer.zero_grad(set_to_none=True)
‘’’’‘‘output = model4(data)
‘’’’‘‘loss = loss_func(output, target)
‘’’’‘‘loss.backward()
‘’’’‘‘optimizer.step()
‘’’’''total_loss.append(loss.item())
‘’'loss_list.append(sum(total_loss) / len(total_loss))
‘’'print(“Training [{:.0f}%]\tLoss: {:.4f}”.format(100.0 * (epoch + 1) / epochs, loss_list[-1]))