Hello.
It seems that CUDA support has been enhanced in PennyLane v0.19.0.
So I tried default.qubit + backprop, but I got an error.
Is there anything that can be done in the user code?
The following is the error message.
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument tensors in method wrapper___cat)
Here is the code I tried.
import pennylane as qml
import pytorch_lightning as pl
import torch
from torch import nn
class QNet(nn.Module):
def __init__(self, n_input: int, q_dev: str, q_diff: str, n_layers: int) -> None:
super().__init__()
self.n_wires = n_input
self.n_layers = n_layers
dev = qml.device(q_dev, wires=self.n_wires)
self.qnode = qml.QNode(self._circuit, dev, diff_method=q_diff)
weight_shapes = {
'params': qml.StronglyEntanglingLayers.shape(self.n_layers, self.n_wires)
}
self.model = qml.qnn.TorchLayer(self.qnode, weight_shapes)
def forward(self, x):
return self.model(x)
def _circuit(self, inputs, params):
qml.AngleEmbedding(inputs, wires=range(self.n_wires))
qml.StronglyEntanglingLayers(params, wires=range(self.n_wires))
return qml.expval(qml.PauliZ(0))
def main():
n_features = 20
batch_size = 16
q_dev = 'default.qubit'
q_diff = 'backprop' # => error
# q_diff = 'adjoint' # => ok
n_layers = 6
model = QNet(
n_features,
q_dev,
q_diff,
n_layers
)
pl.seed_everything(42)
device_name = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
x = torch.randn((batch_size, n_features), device=device)
assert x.device != 'cpu'
model = model.to(device)
predicted = model(x) # <= error occured
assert predicted.device != 'cpu'
if __name__ == '__main__':
main()