Pass batches with @qml.batch_params but not as a decorator

Hello,

I am working on building a nn.Module class that has a circuit, and that uses qml.qnn.TorchLayer.
For training, I would like to pass batches of images through this layer. This is my code:

class MyNet(nn.Module):
    def __init__(self, n_qubits, n_layers=1, device='lightning.qubit', rot_axis='X):
        super().__init__()
        self.n_qubits = n_qubits
        self.rot_axis = rot_axis
        self.device = device
        self.dev = qml.device(self.device, wires=self.n_qubits)
        self.qnode = qml.QNode(self.circuit, self.dev, interface='torch')
        self.weight_shapes = {"weights": qml.StronglyEntanglingLayers.shape(n_layers=n_layers, n_wires=self.n_qubits)}
        self.qlayer = qml.qnn.TorchLayer(self.qnode, self.weight_shapes)

    @qml.batch_params
    def circuit(self, inputs, weights):
        qml.AngleEmbedding(features=inputs, wires=range(self.n_qubits), rotation=self.rot_axis)
        qml.StronglyEntanglingLayers(weights=weights, wires=range(self.n_qubits), imprimitive=qml.ops.CZ)
        return [qml.expval(qml.PauliZ(i)) for i in range(self.n_qubits)]

    def forward(self, x):
       return self.qlayer(x)


I tried using the @qml.batch_params decorator in my code, but it gives me the error:

TypeError: QNode must include an argument with name inputs for inputting data

How can I make passing batches of data work through this self.qlayer?
Why is the decorator not working?

Thank you.

Hi @vallevaro,

Recently we’ve added NumPy-style broadcasting capabilities to PennyLane, so the qml.batch_params is no longer necessary. We’re looking to deprecating that transform soon.

In your case, simply removing the qml.batch_params decorator will make your code works as expected:

class MyNet(nn.Module):
    def __init__(self, n_qubits, n_layers=1, device='lightning.qubit', rot_axis='X'):
        super().__init__()
        self.n_qubits = n_qubits
        self.rot_axis = rot_axis
        self.device = device
        self.dev = qml.device(self.device, wires=self.n_qubits)
        self.qnode = qml.QNode(self.circuit, self.dev, interface='torch')
        self.weight_shapes = {"weights": qml.StronglyEntanglingLayers.shape(n_layers=n_layers, n_wires=self.n_qubits)}
        self.qlayer = qml.qnn.TorchLayer(self.qnode, self.weight_shapes)

    def circuit(self, inputs, weights):
        qml.AngleEmbedding(features=inputs, wires=range(self.n_qubits), rotation=self.rot_axis)
        qml.StronglyEntanglingLayers(weights=weights, wires=range(self.n_qubits), imprimitive=qml.ops.CZ)
        return [qml.expval(qml.PauliZ(i)) for i in range(self.n_qubits)]

    def forward(self, x):
       return self.qlayer(x)


def main():
    n_qubits = 2
    net = MyNet(n_qubits)

    x = torch.tensor(np.random.uniform(0, 1, (10, 2)))
    print(net(x))

Output:

tensor([[-0.9911,  0.9202],
        [-0.6943,  0.8678],
        [-0.9854,  0.9979],
        [-0.8679,  0.6809],
        [-0.9817,  0.7578],
        [-0.9891,  0.9288],
        [-0.9823,  0.9442],
        [-0.7939,  0.5708],
        [-0.8812,  0.6121],
        [-0.7665,  0.9585]], dtype=torch.float64, grad_fn=<StackBackward0>)
1 Like

Thank you so much! I don’t know why I didn’t try this out. :smiley:

1 Like