When running this line: model_hybrid = train_model(
model_hybrid, criterion, optimizer_hybrid, exp_lr_scheduler, num_epochs=num_epochs
)
I get:
Training started:
C:\Users\risto\anaconda3\lib\site-packages\torch\nn\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at …\c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
RuntimeError Traceback (most recent call last)
in
----> 1 model_hybrid = train_model(
2 model_hybrid, criterion, optimizer_hybrid, exp_lr_scheduler, num_epochs=num_epochs
3 )
in train_model(model, criterion, optimizer, scheduler, num_epochs)
37 loss = criterion(outputs, labels)
38 if phase == “train”:
—> 39 loss.backward()
40 optimizer.step()
41
~\anaconda3\lib\site-packages\torch_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
253 create_graph=create_graph,
254 inputs=inputs)
–> 255 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
256
257 def register_hook(self, hook):
~\anaconda3\lib\site-packages\torch\autograd_init_.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
145 retain_graph = create_graph
146
–> 147 Variable.execution_engine.run_backward(
148 tensors, grad_tensors, retain_graph, create_graph, inputs,
149 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
~\anaconda3\lib\site-packages\torch\autograd\function.py in apply(self, *args)
85 def apply(self, *args):
86 # _forward_cls is defined by derived class
—> 87 return self._forward_cls.backward(self, *args) # type: ignore[attr-defined]
88
89
~\anaconda3\lib\site-packages\pennylane\interfaces\torch.py in backward(ctx, dy)
173 “”“Implements the backwards pass QNode vector-Jacobian product”""
174 ctx.dy = dy
–> 175 vjp = dy.view(1, -1) @ ctx.jacobian.apply(ctx, *ctx.saved_tensors)
176 vjp = torch.unbind(vjp.view(-1))
177 return (None,) + tuple(vjp)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking arugment for argument mat2 in method wrapper_mm)