How to add more variables in qnode while turning it to Torch Layer?

Hi, I learned some tips about adding Quantum Circuit into Torch Layer.
The below example cites from this page qml.qnn.TorchLayer.

n_qubits = 2
dev = qml.device("default.qubit", wires=n_qubits)

@qml.qnode(dev)
def qnode(inputs, weights_0, weight_1):
    qml.RX(inputs[0], wires=0)
    qml.RX(inputs[1], wires=1)
    qml.Rot(*weights_0, wires=0)
    qml.RY(weight_1, wires=1)
    qml.CNOT(wires=[0, 1])
    return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))

weight_shapes = {"weights_0": 3, "weight_1": 1}
qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)

I wonder whether I can add the variables into the qnode, such as the “layer_num” below:

dev = qml.device("default.qubit", wires = 2)

@qml.qnode(dev)
def qnode(inputs, weights, layer_num):
    
    qml.AmplitudeEmbedding(features=inputs, wires=range(2), normalize=True)

    
    for ii in range(layer_num):
        qml.RY(weights[ii][0], wires=0)
        qml.RY(weights[ii][1], wires=1)


    return qml.probs(wires=list(range(4)))


inputs = torch.range(0,3)
layer_num = 10
weight_shape = {"weights": (layer_num,2)}

qgen = qml.qnn.TorchLayer(qnode, weight_shape)

I got the error message ValueError: Must specify a shape for every non-input parameter in the QNode after running this code. Please help me solve this problem, thank you :face_holding_back_tears: :face_holding_back_tears: :face_holding_back_tears:

Hey @mini!

For your second code example, you don’t need to specify layer_num since it’s tied to the dimension of weights — simply passing in weights does the trick, then you can just loop over len(weights):

dev = qml.device("default.qubit", wires=2)


@qml.qnode(dev)
def qnode(inputs, weights):
    qml.AmplitudeEmbedding(features=inputs, wires=range(2), normalize=True)

    for ii in range(len(weights)):
        qml.RY(weights[ii][0], wires=0)
        qml.RY(weights[ii][1], wires=1)

    return qml.probs(wires=list(range(4)))


inputs = torch.range(0, 3)
layer_num = 10
weight_shape = {"weights": (layer_num, 2)}

qgen = qml.qnn.TorchLayer(qnode, weight_shape)

Hope this helps! :slight_smile:

Thank you so much!
But I still have a problem with whether I can give the variables while adding QNode into Torch Layer.
For instance, I only want to apply RY gates on specific wires which depends on the given variable.

Ah! I see. I think this should work — just have to nest things a little :sweat_smile: :

dev = qml.device("default.qubit", wires=2)

def create_qnode(wire):

    @qml.qnode(dev)
    def qnode(inputs, weights):
        qml.AmplitudeEmbedding(features=inputs, wires=range(2), normalize=True)
        qml.RX(weights[0], wires=0)
        qml.RX(weights[1], wires=1)
        qml.Hadamard(wires=wire)

        return qml.probs(wires=range(2))

    return qnode


inputs = torch.range(0, 3)
weight_shape = {"weights": (2,)}
qnode = create_qnode(0)
print(qnode(inputs, np.random.uniform(0, 1, size=weight_shape["weights"]))) # tensor([0.2025, 0.5118, 0.1429, 0.1429], dtype=torch.float64)
qgen = qml.qnn.TorchLayer(qnode, weight_shape)

Let me know if that does the trick!

Yes! This works! Thank you again :laughing:
I have another question, could I let dev be the variable of my function?
Such as the example code below:

import pennylane as qml
import torch
import numpy as np


def create_qnode(total_qubit,wire):

    dev = qml.device("default.qubit", wires=total_qubit)

    @qml.qnode(dev)
    def qnode(inputs, weights):
        qml.AmplitudeEmbedding(features=inputs, wires=range(total_qubit), normalize=True)
        for ii in range(total_qubit):
            qml.RX(weights[ii], wires=ii)
        
        qml.Hadamard(wires=wire)

        return qml.probs(wires=range(total_qubit))

    return qnode


inputs = torch.range(0, 3)
total_qubit = 2
qnode = create_qnode(total_qubit=total_qubit, wire=0)
weight_shape = {"weights": (total_qubit,)}

print(qnode(inputs, np.random.uniform(0, 1, size=weight_shape["weights"]))) 
qgen = qml.qnn.TorchLayer(qnode, weight_shape)

I want to define the variable total_qubit by myself.
The code works, but I’m not sure if this is reasonable or not.

Yep! That’s totally reasonable :slight_smile:.

1 Like