n_qubits = 3
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
wt = 0
for x in range(n_qubits):
qml.RZ(inputs[x], wires=x)
for x in range (n_qubits):
qml.RY(weights[x+wt], wires=x)
qml.RX(weights[x+wt+1], wires=x)
wt = wt+1
return qml.probs(wires = [0,1,2])
weight_shapes = {"weights": (n_qubits* 2)}
init_method = {"weights": torch.nn.init.normal_}
class HybridModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(3, 16, kernel_size=5)
self.conv2 = Conv2d(16, 32, kernel_size=5)
self.conv3 = Conv2d(32, 32, kernel_size=3)
self.fc1 = Linear(288, 6)
self.qlayer_1 = qml.qnn.TorchLayer(qnode, weight_shapes,init_method = init_method)
self.qlayer_2 = qml.qnn.TorchLayer(qnode, weight_shapes,init_method = init_method)
self.fc2 = Linear(16, 10) # 2-dimensional input to QNN
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv3(x))
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
y = torch.split(x, n_qubits,dim =1)
qnn_output = []
qnn_out = self.qlayer_1(y[0]) # apply QNN
qnn_output.append(qnn_out)
qnn_out = self.qlayer_2(y[1]) # apply QNN
#Concatenate QNN results along batch size dimension
x = torch.cat(qnn_output, dim=1)
x = self.fc2(x)
return x
model = HybridModel().to(device)
#Training
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
loss_func = nn.CrossEntropyLoss()
# Start training
epochs = 5 # Set number of epochs
loss_list = [] # Store loss history
model.train() # Set model to training mode
for epoch in range(epochs):
total_loss = []
model.train() # Set model to training mode
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad(set_to_none=True)
output = model(data) # Forward pass
loss = loss_func(output, target) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item())
avg_loss = sum(total_loss) / len(total_loss)
loss_list.append(avg_loss)
in above given code for training a Hybrid Classical-Quantum neural network on CIFAR dataset with batch size of 100, following error message is showing up:
<ipython-input-18-fd134f2e7d01> in <cell line: 17>()
27 optimizer.zero_grad(set_to_none=True)
28
---> 29 output = model(data) # Forward pass
30
31 loss = loss_func(output, target) # Calculate loss
5 frames
/usr/local/lib/python3.10/dist-packages/pennylane/qnn/torch.py in forward(self, inputs)
411 # reshape to the correct number of batch dims
412 if has_batch_dim:
--> 413 results = torch.reshape(results, (*batch_dims, *results.shape[1:]))
414
415 return results
RuntimeError: shape '[100, 8]' is invalid for input of size 24