Hello and thanks for your response. Here, i am trying to understand how to automatically determine the shape of the weights parameter of the TorchLayer class:
# This notebooke shows how to allocate the weights parameter of qml.qnn.TorchLayer.
# Failing to do so correctly may throw the notorious ValueError: Must specify a shape for every non-input parameter in the QNode.
%reset -f
import numpy as np
import pennylane as qml
import torch
import sklearn.datasets
import sklearn.metrics
from pennylane.ops.qubit import CNOT
import matplotlib.pyplot as plt
from qugel.qgates import *
n_qubits = 2
# Generate random parameters for the quantum circuit
inp_arr= torch.tensor(np.pi * np.random.randn(n_qubits))
n_layers=2
w_dim=3
dev4 = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev4, interface='torch')
def qnode001(inputs, weights):
# print("Shapes: inputs={}, params_arr={}, q_bits={}, q_depth={}".format(inputs.shape, weights.shape, n_qubits, n_layers))
qml.templates.AmplitudeEmbedding(inputs, wires=[i for i in range(n_qubits)], normalize=True, pad_with=4)
qml.templates.StronglyEntanglingLayers(weights, wires=[i for i in range(n_qubits)], ranges=None, imprimitive=CNOT)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
# @qml.qnode(dev4)
# def CONVCircuit(inputs, weights):
# """
# Args:
# inputs: the image to encode.
# params: phi_pqc_array, the parameters of the PQC
# q_bits: number of qubits
# q_depth: number of layers in the quantum circuit
# """
# wires=n_qubits
# num_rep = int(inputs.shape[0] / n_qubits)
# # print("Shapes: inputs={}, params_arr={}, q_bits={}, q_depth={}".format(inputs.shape, weights.shape, n_qubits, n_layers))
# # qml.AmplitudeEmbedding(features=inputs, wires=range(0, q_bits), normalize=True, pad_with=params_arr.flatten().shape[0])
# Q_encoding_block(inputs, n_qubits)
# # Q_quanvol_block_A(weights, n_qubits, n_layers)
# exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(n_qubits)]
# # print("Patch:{}, Measure:{}, Reps:{}".format(inputs.shape, len(exp_vals), num_rep))
# return exp_vals
class QNN(torch.nn.Module):
def __init__(self, circ):
super(QNN, self).__init__()
weight_shapes = {"weights": (1, n_qubits, 3)}
# print (weight_shapes.get("weights"))
self.pqc=circ
self.qlayer = qml.qnn.TorchLayer(self.pqc, weight_shapes)
# Draw the quantum circuit
fig, ax = qml.draw_mpl(self.pqc, expansion_strategy='device')(inp_arr, torch.zeros(n_layers, n_qubits, w_dim))
plt.show()
fig.show()
self.clayer2 = torch.nn.Linear(2, 2)
def forward(self, x):
# print (x.shape)
output = self.qlayer(x)
output = self.clayer2(output)
return output
def print_network(model):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
# print(model)
print("The number of parameters: {}".format(num_params))
def Q_count_parameters(qnn):
# print(dict(qnn.named_parameters()))
num_params = 0
for name, param in qnn.named_parameters():
param.requires_grad = True
print(name, param.data)
print (qnn)
return sum(p.numel() for p in qnn.parameters() if p.requires_grad)
model = QNN(qnode001)
Q_count_parameters(model)
# Data
samples = 500
x, y = sklearn.datasets.make_moons(samples)
y_hot = np.zeros((samples, 2))
y_hot[np.arange(samples), y] = 1
X = torch.tensor(x).float()
Y = torch.tensor(y_hot).float()
# Validation data
val_samples = 100
val_x, val_y = sklearn.datasets.make_moons(val_samples)
val_y_hot = np.zeros((val_samples, 2))
val_y_hot[np.arange(val_samples), val_y] = 1
val_X = torch.tensor(val_x).float()
val_Y = torch.tensor(val_y_hot).float()
# Optimizer and loss function
# opt = torch.optim.Adam(model.parameters(), lr=0.3)
opt = torch.optim.Adagrad(model.parameters(), lr=0.3)
loss = torch.nn.L1Loss()
# Training parameters
epochs = 15
batch_size =64
batches = samples // batch_size
# Data loader
data_loader = torch.utils.data.DataLoader(list(zip(X, Y)), batch_size=batch_size, shuffle=True, drop_last=True)
# Lists for storing losses and accuracies
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []
from tqdm import tqdm
for epoch in tqdm(range(epochs)):
running_loss = 0
correct = 0
total = 0
for x, y in data_loader:
opt.zero_grad()
outputs = model(x)
loss_evaluated = loss(outputs, y)
loss_evaluated.backward()
opt.step()
running_loss += loss_evaluated.item()
_, predicted = torch.max(outputs.data, 1)
total += y.size(0)
correct += (predicted == y.argmax(dim=1)).sum().item()
avg_loss = running_loss / batches
accuracy = 100 * correct / total
# Validation
val_outputs = model(val_X)
val_outputs = val_outputs.view(val_samples, -1) # Reshape val_outputs
val_loss = loss(val_outputs, val_Y)
val_predicted = torch.max(val_outputs.data, 1)[1]
val_accuracy = 100 * (val_predicted == val_Y.argmax(dim=1)).sum().item() / val_samples
# Store losses and accuracies
train_losses.append(avg_loss)
val_losses.append(val_loss.item())
train_accuracies.append(accuracy)
val_accuracies.append(val_accuracy)
print("Epoch {}: Loss = {:.4f}, Accuracy = {:.2f}%, Val Loss = {:.4f}, Val Accuracy = {:.2f}%".format(
epoch + 1, avg_loss, accuracy, val_loss.item(), val_accuracy))
# Calculate confusion matrix
val_predicted_labels = val_predicted.numpy()
val_true_labels = val_Y.argmax(dim=1).numpy()
confusion_matrix = sklearn.metrics.confusion_matrix(val_true_labels, val_predicted_labels, normalize='true')
# Plot confusion matrix
plt.figure(figsize=(6, 4))
plt.imshow(confusion_matrix, cmap='Blues')
plt.title('Confusion Matrix')
plt.colorbar()
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.xticks([0, 1])
plt.yticks([0, 1])
# Display percentages inside the matrix
thresh = confusion_matrix.max() / 2
for i in range(2):
for j in range(2):
plt.text(j, i, f'{confusion_matrix[i, j]*100:.2f}%', ha="center", va="center", color="white" if confusion_matrix[i, j] > thresh else "black")
plt.show()
# Plotting the losses and accuracies
epochs_range = range(1, epochs + 1)
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_losses, label='Training Loss')
plt.plot(epochs_range, val_losses, label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_accuracies, label='Training Accuracy')
plt.plot(epochs_range, val_accuracies, label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy (%)')
plt.legend()
plt.tight_layout()
plt.show()
This is my output:
(1, 2, 2)
qlayer.weights tensor([[[6.0415, 0.5459],
[2.0931, 4.1954]]])
clayer2.weight tensor([[ 0.7059, -0.5791],
[-0.2732, 0.4173]])
clayer2.bias tensor([-0.5260, 0.1105])
QNN(
(qlayer): <Quantum Torch Layer: func=qnode001>
(clayer2): Linear(in_features=2, out_features=2, bias=True)
)
Shapes: inputs=torch.Size([2]), params_arr=torch.Size([1, 2, 2]), q_bits=2, q_depth=2
But there is an error:
ValueError: Weights tensor must have third dimension of length 3; got 2
I know where it happnes:
weight_shapes = {"weights": (1, n_qubits, n_layers)}
If I change n_layers to 3, the error disappears. Where did the number 3 came from? Can you explain how I could have known that in advance?
The correctly running notebook is here: Qugel/qugel_007_simple_qnn.ipynb at master · BoltzmannEntropy/Qugel · GitHub
Thanks,