Hello Staff,
I would like to use my own datasets with ’ Turning quantum nodes into Torch Layers’. This is being done with ‘Quantum transfer learning’ in the order: resnet, quantum_net, New quantum_net layers.
# New Code
clayer_1 = torch.nn.Linear(2, 2)
clayer_2 = torch.nn.Linear(2, 2)
softmax = torch.nn.Softmax(dim=1)
# New Code
class DressedQuantumNet(nn.Module):
"""
Torch module implementing the *dressed* quantum net.
"""
def __init__(self):
"""
Definition of the *dressed* layout.
"""
super().__init__()
self.pre_net = nn.Linear(512, n_qubits)
self.q_params = nn.Parameter(q_delta * torch.randn(q_depth * n_qubits))
self.post_net = nn.Linear(n_qubits, 2)
# New Code
self.clayer_1 = torch.nn.Linear(2, 4)
self.qlayer_1 = qml.qnn.TorchLayer(qnode=quantum_net, weight_shapes={"weights": (q_depth, n_qubits)})
self.qlayer_2 = qml.qnn.TorchLayer(qnode=quantum_net, weight_shapes={"weights": (q_depth, n_qubits)})
self.clayer_2 = torch.nn.Linear(4, 2)
self.softmax = torch.nn.Softmax(dim=1)
# New Code```
def forward(self, input_features):
"""
Defining how tensors are supposed to move through the *dressed* quantum
net.
"""
# obtain the input features for the quantum circuit
# by reducing the feature dimension from 512 to 4
pre_out = self.pre_net(input_features)
q_in = torch.tanh(pre_out) * np.pi / 2.0
# Apply the quantum circuit to each element of the batch and append to q_out
q_out = torch.Tensor(0, n_qubits)
q_out = q_out.to(device)
for elem in q_in:
q_out_elem = torch.hstack(quantum_net(elem, self.q_params)).float().unsqueeze(0)
q_out = torch.cat((q_out, q_out_elem))
# return the two-dimensional prediction from the postprocessing layer
return self.post_net(q_out)
# New Code
x = self.clayer_1(x)
x_1, x_2 = torch.split(x, 2, dim=1)
x_1 = self.qlayer_1(x_1)
x_2 = self.qlayer_2(x_2)
x = torch.cat([x_1, x_2], axis=1)
x = self.clayer_2(x)
return self.softmax(x)
# New Code
What would be needed to make this a functional model:
/usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
warnings.warn(
/usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.
warnings.warn(msg)
Downloading: "https://download.pytorch.org/models/resnet18-f37072fd.pth" to /root/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth
100%|██████████| 44.7M/44.7M [00:00<00:00, 53.7MB/s]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-f1b163e1658e> in <cell line: 8>()
6
7 # Notice that model_hybrid.fc is the last layer of ResNet18
----> 8 model_hybrid.fc = DressedQuantumNet()
9
10 # Use CUDA or CPU according to the "device" object.
2 frames
/usr/local/lib/python3.10/dist-packages/pennylane/qnn/torch.py in _signature_validation(self, qnode, weight_shapes)
362
363 if self.input_arg not in sig:
--> 364 raise TypeError(
365 f"QNode must include an argument with name {self.input_arg} for inputting data"
366 )
TypeError: QNode must include an argument with name inputs for inputting data
'Turning quantum nodes into Torch Layers | PennyLane Demos
'Quantum transfer learning | PennyLane Demos