Hi. I am tring to implement a quantum-classical hybrid neural network. But I found that the training is very slow. When I use the GPU, I meet some problem. Is there any way to use GPU for the training?
import pennylane as qml
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
batch_size = 32
epochs = 30
lr = 1e-3
w_decay = 1e-4
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda')
n_qubits = 16
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, w):
for i in range(n_qubits):
qml.RX((inputs[0:batch_size, i]), wires=i)
qml.Rot(*w, wires=0)
for cont in range(15):
qml.CNOT(wires=[cont, cont+1])
return (qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliZ(3)), qml.expval(qml.PauliZ(4)),
qml.expval(qml.PauliZ(5)), qml.expval(qml.PauliZ(6)), qml.expval(qml.PauliZ(7)), qml.expval(qml.PauliZ(8)), qml.expval(qml.PauliZ(9)),
qml.expval(qml.PauliZ(10)), qml.expval(qml.PauliZ(11)), qml.expval(qml.PauliZ(12)), qml.expval(qml.PauliZ(13)), qml.expval(qml.PauliZ(14)),
qml.expval(qml.PauliZ(15)))
weight_shapes = {"w": 3}
class MNIST(nn.Module):
def __init__(self):
super().__init__()
# 1,28x28
self.pool = torch.nn.AvgPool2d(7, 7) # 4x4
self.qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)
self.fc1 = torch.nn.Linear(16, 10)
def forward(self, x):
bsz = x.shape[0]
x = self.pool(x).view(bsz, 16)
out = self.qlayer(x)
out = self.fc1(out)
out = F.log_softmax(out, dim=1)
return out
model = MNIST().to(device)
a = torch.rand((1,1,28,28)).to(device)
b = model(a)
But I get the error as follows.
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0, cpu!
Can anyone help me with this error?