Hello,
I’m experiencing issues when changing the batch_size to some greater than one while using the pennylane pytorch interface.
I keep getting the error:
RuntimeError: shape ‘[10, -1]’ is invalid for input of size 1
But I have checked that the q_node is getting input of size [batch_size, num_qubits] ([10,2]) for me.
Is this an issue with the current version of Pennylane?
Here’s some of my code. Any help would be greatly appreciated:
Train Dataset
-------------
Set train shuffle seed (for reproducibility)
manual_seed(42)
batch_size = 10
n_samples = 500 # We will concentrate on the first 100 samples
Use pre-defined torchvision function to load MNIST train data
X_train = datasets.MNIST(
root=“./data”, train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_train.targets == 0)[0][:n_samples], np.where(X_train.targets == 1)[0][:n_samples]
)
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
Define torch dataloader with filtered data
train_loader = DataLoader(X_train, batch_size=batch_size, shuffle=True)
Test Dataset
-------------
Set test shuffle seed (for reproducibility)
manual_seed(5)
n_samples = 250 # was 50
Use pre-defined torchvision function to load MNIST test data
X_test = datasets.MNIST(
root=“./data”, train=False, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_test.targets == 0)[0][:n_samples], np.where(X_test.targets == 1)[0][:n_samples]
)
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
Define torch dataloader with filtered data
test_loader = DataLoader(X_test, batch_size=batch_size, shuffle=True)
import pennylane as qml
from pennylane.templates import AngleEmbedding, BasicEntanglerLayers
import torch
n_qubits = 2
dev = qml.device(“default.qubit”, wires=n_qubits)
Simple circuit based off Pennylane example
@qml.qnode(dev)
def qnode(inputs, weights):
# Feature map (Manually defined ZZFeatureMap-like circuit)
for i in range(n_qubits):
qml.RX(weights[i],wires=i)
qml.RX(weights[i+2],wires=i)
qml.CNOT(wires=[0, 1])
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
n_layers = 1 # Set the number of ansatz layers as needed
n_params = 4
weight_shapes = {“weights”: (n_params,)} # Single parameter tuple
print(weight_shapes)
qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)
Visualize the circuit
print(qml.draw(qlayer, expansion_strategy=“device”)(torch.tensor([0.1, 0.2])))
Define torch NN module
class Net(Module):
def init(self):
super().init()
self.conv1 = Conv2d(1, 2, kernel_size=5)
self.conv2 = Conv2d(2, 16, kernel_size=5)
self.dropout = Dropout2d()
self.fc1 = Linear(256, 64)
self.fc2 = Linear(64, 2) # 2-dimensional input to QNN
self.qlayer_1 = qml.qnn.TorchLayer(qnode, weight_shapes)
# No need for self.fc3 in this context
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
# print("Shape of x:", x.shape) # Debugging line
quantum_output = self.qlayer_1(x)
# print("Shape of quantum_output:", quantum_output.shape) # Debugging line
# Concatenate quantum_output with the original x
concatenated_output = torch.cat((x, quantum_output), dim=1)
return concatenated_output
model4 = Net()
start = timeit.default_timer()
Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = CrossEntropyLoss() # CHANGE ??? change back???
Start training
epochs = 10 # Set number of epochs
loss_list = # Store loss history
model4.train() # Set model to training mode
for epoch in range(epochs):
total_loss =
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Initialize gradient
output = model4(data) # Forward pass, ???
loss = loss_func(output, target) # Calculate loss. MIGHT NEED TO SWITCH AS WAS BINARY
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item()) # Store loss
loss_list.append(sum(total_loss) / len(total_loss))
print(“Training [{:.0f}%]\tLoss: {:.4f}”.format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
Stop timer
stop = timeit.default_timer()
print('Time: ', stop - start)