QLSTM List out of Index

Hello I have two sets of code, one from a .py file that has the important classes.

class QLSTM(nn.Module):
    def __init__(self,
                 input_size,
                 hidden_size,
                 n_qubits=4,
                 n_qlayers=1,
                 n_vrotations=3,
                 batch_first=True,
                 return_sequences=False,
                 return_state=False,
                 noise = nm,
                 backend="qiskit.aer"):
        super(QLSTM, self).__init__()
        self.n_inputs = input_size
        self.hidden_size = hidden_size
        self.concat_size = self.n_inputs + self.hidden_size
        self.n_qubits = n_qubits
        self.n_qlayers = n_qlayers
        self.n_vrotations = n_vrotations
        self.backend = backend  # "default.qubit", "qiskit.basicaer", "qiskit.ibm"

        self.noise = noise

        self.batch_first = batch_first
        self.return_sequences = return_sequences
        self.return_state = return_state

        self.wires_forget = [f"wire_forget_{i}" for i in range(self.n_qubits)]
        self.wires_input = [f"wire_input_{i}" for i in range(self.n_qubits)]
        self.wires_update = [f"wire_update_{i}" for i in range(self.n_qubits)]
        self.wires_output = [f"wire_output_{i}" for i in range(self.n_qubits)]


         self.dev_forget = qml.device(self.backend, wires=self.wires_forget, noise_model = self.noise)
         self.dev_input = qml.device(self.backend, wires=self.wires_input, noise_model = self.noise)
         self.dev_update = qml.device(self.backend, wires=self.wires_update, noise_model = self.noise)
         self.dev_output = qml.device(self.backend, wires=self.wires_update, noise_model = self.noise)
        
        def ansatz(params, wires_type):
            # Entangling layer.
            for i in range(1, 3):
                for j in range(self.n_qubits):
                    if j + i < self.n_qubits:
                        qml.CNOT(wires=[wires_type[j], wires_type[j + i]])
                    else:
                        qml.CNOT(wires=[wires_type[j], wires_type[j + i - self.n_qubits]])

            # Variational layer.
            for i in range(self.n_qubits):
                qml.RX(params[0][i], wires=wires_type[i])
                qml.RY(params[1][i], wires=wires_type[i])
                qml.RZ(params[2][i], wires=wires_type[i])

        def VQC(features, weights, wires_type):
            # Preproccess input data to encode the initial state.
            # qml.templates.AngleEmbedding(features, wires=wires_type)
            ry_params = [torch.arctan(feature) for feature in features]
            rz_params = [torch.arctan(feature ** 2) for feature in features]
            for i in range(self.n_qubits):
                qml.Hadamard(wires=wires_type[i])
                qml.RY(ry_params[i], wires=wires_type[i])
                qml.RZ(ry_params[i], wires=wires_type[i])

            # Variational block.
            qml.layer(ansatz, self.n_qlayers, weights, wires_type=wires_type)

        def _circuit_forget(inputs, weights):
            VQC(inputs, weights, self.wires_forget)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_forget]

        self.qlayer_forget = qml.QNode(_circuit_forget, self.dev_forget, interface="torch")

        def _circuit_input(inputs, weights):
            VQC(inputs, weights, self.wires_input)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_input]

        self.qlayer_input = qml.QNode(_circuit_input, self.dev_input, interface="torch")

        def _circuit_update(inputs, weights):
            VQC(inputs, weights, self.wires_update)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_update]

        self.qlayer_update = qml.QNode(_circuit_update, self.dev_update, interface="torch")

        def _circuit_output(inputs, weights):
            VQC(inputs, weights, self.wires_output)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_output]

        self.qlayer_output = qml.QNode(_circuit_output, self.dev_output, interface="torch")

        weight_shapes = {"weights": (self.n_qlayers, self.n_vrotations, self.n_qubits)}
        print(
            f"weight_shapes = (n_qlayers, n_vrotations, n_qubits) = ({self.n_qlayers}, {self.n_vrotations}, {self.n_qubits})")

        self.clayer_in = torch.nn.Linear(self.concat_size, self.n_qubits)
        self.VQC = {
            'forget': qml.qnn.TorchLayer(self.qlayer_forget, weight_shapes),
            'input': qml.qnn.TorchLayer(self.qlayer_input, weight_shapes),
            'update': qml.qnn.TorchLayer(self.qlayer_update, weight_shapes),
            'output': qml.qnn.TorchLayer(self.qlayer_output, weight_shapes)
        }
        self.clayer_out = torch.nn.Linear(self.n_qubits, self.hidden_size)
        # self.clayer_out = [torch.nn.Linear(n_qubits, self.hidden_size) for _ in range(4)]

    def forward(self, x, init_states=None):
        '''
        x.shape is (batch_size, seq_length, feature_size)
        recurrent_activation -> sigmoid
        activation -> tanh
        '''
        if self.batch_first is True:
            batch_size, seq_length, features_size = x.size()
        else:
            seq_length, batch_size, features_size = x.size()

        hidden_seq = []
        if init_states is None:
            h_t = torch.zeros(batch_size, self.hidden_size)  # hidden state (output)
            c_t = torch.zeros(batch_size, self.hidden_size)  # cell state
        else:
            # for now we ignore the fact that in PyTorch you can stack multiple RNNs
            # so we take only the first elements of the init_states tuple init_states[0][0], init_states[1][0]
            h_t, c_t = init_states
            h_t = h_t[0]
            c_t = c_t[0]

        for t in range(seq_length):
            # get features from the t-th element in seq, for all entries in the batch
            x_t = x[:, t, :]

            # Concatenate input and hidden state
            v_t = torch.cat((h_t, x_t), dim=1)

            # match qubit dimension
            y_t = self.clayer_in(v_t)

            f_t = torch.sigmoid(self.clayer_out(self.VQC['forget'](y_t)))  # forget block
            i_t = torch.sigmoid(self.clayer_out(self.VQC['input'](y_t)))  # input block
            g_t = torch.tanh(self.clayer_out(self.VQC['update'](y_t)))  # update block
            o_t = torch.sigmoid(self.clayer_out(self.VQC['output'](y_t)))  # output block

            c_t = (f_t * c_t) + (i_t * g_t)
            h_t = o_t * torch.tanh(c_t)

            hidden_seq.append(h_t.unsqueeze(0))
        hidden_seq = torch.cat(hidden_seq, dim=0)
        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
        return hidden_seq, (h_t, c_t)


class QShallowRegressionLSTM(nn.Module):
    def __init__(self, num_sensors, hidden_units, n_qubits=0, n_qlayers=1):
        super().__init__()
        self.num_sensors = num_sensors  # this is the number of features
        self.hidden_units = hidden_units
        self.num_layers = 1

        # self.lstm = nn.LSTM(
        #    input_size=num_sensors,
        #    hidden_size=hidden_units,
        #    batch_first=True,
        #    num_layers=self.num_layers
        # )

        self.lstm = QLSTM(
            input_size=num_sensors,
            hidden_size=hidden_units,
            batch_first=True,
            n_qubits=n_qubits,
            n_qlayers=n_qlayers
        )

        self.linear = nn.Linear(in_features=self.hidden_units, out_features=1)

    def forward(self, x):
        batch_size = x.shape[0]
        h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
        c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()

        _, (hn, _) = self.lstm(x, (h0, c0))
        out = self.linear(hn).flatten()  # First dim of Hn is num_layers, which is set to 1 above.

        return out

Then I’m running additional code on a notebook. Here is the code.

def train_model(data_loader, model, loss_function, optimizer):
    num_batches = len(data_loader)
    total_loss = 0
    model.train()

    for X, y in data_loader:
        output = model(X)
        loss = loss_function(output, y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    avg_loss = total_loss / num_batches
    print(f"Train loss: {avg_loss}")
    return avg_loss

def test_model(data_loader, model, loss_function):

    num_batches = len(data_loader)
    total_loss = 0

    model.eval()
    with torch.no_grad():
        for X, y in data_loader:
            output = model(X)
            total_loss += loss_function(output, y).item()

    avg_loss = total_loss / num_batches
    print(f"Test loss: {avg_loss}")
    return avg_loss
def predict(data_loader, model):
    """Just like `test_loop` function but keep track of the outputs instead of the loss
    function.
    """
    output = torch.tensor([])
    model.eval()
    with torch.no_grad():
        for X, _ in data_loader:
            y_star = model(X)
            output = torch.cat((output, y_star), 0)

    return output
from factory import QShallowRegressionLSTM

learning_rate = 0.05
num_hidden_units = 16

Qmodel = QShallowRegressionLSTM(num_sensors=len(features), hidden_units=num_hidden_units, n_qubits=4)
loss_function = nn.MSELoss()
optimizer = torch.optim.Adagrad(Qmodel.parameters(), lr=learning_rate)

quantum_loss_train = []
quantum_loss_test = []
print("Untrained test\n--------")
start = time.time()
test_loss = test_model(test_loader, Qmodel, loss_function)
end = time.time()
print("Execution time", end - start)
quantum_loss_test.append(test_loss)

for ix_epoch in range(20):
    print(f"Epoch {ix_epoch}\n---------")
    start = time.time()
    train_loss = train_model(train_loader, Qmodel, loss_function, optimizer=optimizer)
    test_loss = test_model(test_loader, Qmodel, loss_function)
    end = time.time()
    print("Execution time", end - start)
    quantum_loss_train.append(train_loss)
    quantum_loss_test.append(test_loss)

Here is the full error message, down below.

--------
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-16-966b8482e935> in <cell line: 14>()
     12 print("Untrained test\n--------")
     13 start = time.time()
---> 14 test_loss = test_model(test_loader, Qmodel, loss_function)
     15 end = time.time()
     16 print("Execution time", end - start)

12 frames
<ipython-input-14-36c020aec645> in test_model(data_loader, model, loss_function)
     26     with torch.no_grad():
     27         for X, y in data_loader:
---> 28             output = model(X)
     29             total_loss += loss_function(output, y).item()
     30 

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/content/factory.py in forward(self, x)
    241         c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
    242 
--> 243         _, (hn, _) = self.lstm(x, (h0, c0))
    244         out = self.linear(hn).flatten()  # First dim of Hn is num_layers, which is set to 1 above.
    245 

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/content/factory.py in forward(self, x, init_states)
    198             y_t = self.clayer_in(v_t)
    199 
--> 200             f_t = torch.sigmoid(self.clayer_out(self.VQC['forget'](y_t)))  # forget block
    201             i_t = torch.sigmoid(self.clayer_out(self.VQC['input'](y_t)))  # input block
    202             g_t = torch.tanh(self.clayer_out(self.VQC['update'](y_t)))  # update block

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.10/dist-packages/pennylane/qnn/torch.py in forward(self, inputs)
    406         else:
    407             # calculate the forward pass as usual
--> 408             results = self._evaluate_qnode(inputs)
    409 
    410         # reshape to the correct number of batch dims

/usr/local/lib/python3.10/dist-packages/pennylane/qnn/torch.py in _evaluate_qnode(self, x)
    427             **{arg: weight.to(x) for arg, weight in self.qnode_weights.items()},
    428         }
--> 429         res = self.qnode(**kwargs)
    430 
    431         if isinstance(res, torch.Tensor):

/usr/local/lib/python3.10/dist-packages/pennylane/qnode.py in __call__(self, *args, **kwargs)
    934 
    935         # construct the tape
--> 936         self.construct(args, kwargs)
    937 
    938         cache = self.execute_kwargs.get("cache", False)

/usr/local/lib/python3.10/dist-packages/pennylane/qnode.py in construct(self, args, kwargs)
    825             self.interface = qml.math.get_interface(*args, *list(kwargs.values()))
    826 
--> 827         self._tape = make_qscript(self.func, shots)(*args, **kwargs)
    828         self._qfunc_output = self.tape._qfunc_output
    829 

/usr/local/lib/python3.10/dist-packages/pennylane/tape/qscript.py in wrapper(*args, **kwargs)
   1480     def wrapper(*args, **kwargs):
   1481         with AnnotatedQueue() as q:
-> 1482             result = fn(*args, **kwargs)
   1483 
   1484         qscript = QuantumScript.from_queue(q, shots)

/content/factory.py in _circuit_forget(inputs, weights)
    129 
    130         def _circuit_forget(inputs, weights):
--> 131             VQC(inputs, weights, self.wires_forget)
    132             return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_forget]
    133 

/content/factory.py in VQC(features, weights, wires_type)
    122             for i in range(self.n_qubits):
    123                 qml.Hadamard(wires=wires_type[i])
--> 124                 qml.RY(ry_params[i], wires=wires_type[i])
    125                 qml.RZ(ry_params[i], wires=wires_type[i])
    126 

IndexError: list index out of range

I’m using all of the most recent packages. Thank you in advance for your help.

Hey @lingling36109!

There are imports and other things missing, so I can’t replicate the behaviour. However, the issue is in here:

        def VQC(features, weights, wires_type):
            # Preproccess input data to encode the initial state.
            # qml.templates.AngleEmbedding(features, wires=wires_type)
            ry_params = [torch.arctan(feature) for feature in features]
            rz_params = [torch.arctan(feature ** 2) for feature in features]
            for i in range(self.n_qubits):
                qml.Hadamard(wires=wires_type[i])
                qml.RY(ry_params[i], wires=wires_type[i])
                qml.RZ(ry_params[i], wires=wires_type[i])

            # Variational block.
            qml.layer(ansatz, self.n_qlayers, weights, wires_type=wires_type)

The list ry_params has a length that is less than n_qubits. So, your loop over n_qubits tries to access an element of ry_params that falls outside of its range. I would check what features is, since its length is the same as ry_params.

Good luck debugging! :smiley:

Hmm, the funny thing, it was working before, but know it doesn’t.
If you want to run it, I’m basically using the same code as this github file: GitHub - DikshantDulal/SoftServe_QLSTM: We implement a quantum-classical hybrid QLSTM model by incorporating quantum variational layers into the classical LSTM in order to improve the efficiency and trainability of LSTM for better stock price prediction.

The important files are factory.py, Stock Prediction Draft 3.ipynb, and dataset_MRK_prediction.csv.

Thank you so much for your help again!

Hmm, the funny thing, it was working before, but know it doesn’t.

In that case, it might be worth checking out some of the changes to QNode returns that were introduced as default behaviour in v0.30: QNode returns — PennyLane 0.33.0 documentation

Let me know if that helps!

Hello,
I have exactly the same problem, could you let me know if you managed to fix it ?
Thanks!

Hey @Youce! Welcome to the forum :smile:

Have you checked out the new QNode returns page? (QNode returns — PennyLane 0.32.0 documentation)

If that’s not helping, if you could attach a small code example that replicates the issue you’re having that would help me :grin:

Hello, this was a while ago, but I do believe that the issue was compatibility issues between different packages, specifically pennylane. I believe I used version 0.31.0 or 0.30.0.

1 Like

Hi @lingling36109 , thank you for confirming this! Does this mean you managed to get everything working when you updated your PennyLane version?

@Youce, let us know if using PennyLane v0.32 fixes your issue.

We also have a new PennyLane survey . Let us know your thoughts about PennyLane so that we can keep bringing you amazing features :sparkles:.

Actually, the opposite. I had an issue when I used v0.32.0. Using an older version fixed the compatibility issue. Sorry for being unclear in my original statement!

Thank you for clarifying @lingling36109 !

In that case the most likely explanation is indeed the change in QNode returns. In this page in the documentation you can go to the PyTorch section at the bottom to get a suggestion on how to make this compatible with the latest version. Using torch.hstack could help as mentioned there. Otherwise you can also consult the page on QNode returns which Isaac shared before.

Let us know how if you manage to update your code :smiley:

The page links mentioned in the post are missing!

When I click on the link you have provided, it says page not found.

Hello, I’ve checked the QNode returns page aswell as tried with older versions of pennylane but still the problem seems to persist. Please let me know if there is any other way to try. Thanks :slight_smile:

Hey @Siva_Karthikeya, welcome to the forum! :rocket:

Indeed both pages are no longer available. You can still find the page here: pennylane/doc/introduction/returns.rst at v0.35.0 · PennyLaneAI/pennylane · GitHub

We removed the troubleshooting page for QNode returns because it’s been a while since those changes took effect. Let me know if you have any further questions!

Hello @isaacdevlugt , thank you for your prompt response. I have tried correcting some stuff but haven’t managed to resolve the error yet. The issue exactly what @lingling36109 had pointed out previously. I am quite unable to understand where to add the QNode return changes for my code to work properly.

Hey @Siva_Karthikeya,

Could you attach a minimal example of your code that replicates the problem you’re having? Let’s start there and see if we can solve the problem :slight_smile:

These are the QLSTM class and QShalloRegressions classes that are in a seperate .py file. I’m running the program from a Jupyter notebook.

class QLSTM(nn.Module):

    def __init__(self, 
                input_size, 
                hidden_size, 
                n_qubits=4,
                n_qlayers=1,
                n_vrotations=3,
                batch_first=True,
                return_sequences=False, 
                return_state=False,
                backend="default.qubit"):
        super(QLSTM, self).__init__()
        self.n_inputs = input_size
        self.hidden_size = hidden_size
        self.concat_size = self.n_inputs + self.hidden_size
        self.n_qubits = n_qubits
        self.n_qlayers = n_qlayers
        self.n_vrotations = n_vrotations
        self.backend = backend  # "default.qubit", "qiskit.basicaer", "qiskit.ibm"

        self.batch_first = batch_first
        self.return_sequences = return_sequences
        self.return_state = return_state
        
        self.wires_forget = [f"wire_forget_{i}" for i in range(self.n_qubits)]
        self.wires_input = [f"wire_input_{i}" for i in range(self.n_qubits)]
        self.wires_update = [f"wire_update_{i}" for i in range(self.n_qubits)]
        self.wires_output = [f"wire_output_{i}" for i in range(self.n_qubits)]

        self.dev_forget = qml.device(self.backend, wires=self.wires_forget)
        self.dev_input = qml.device(self.backend, wires=self.wires_input)
        self.dev_update = qml.device(self.backend, wires=self.wires_update)
        self.dev_output = qml.device(self.backend, wires=self.wires_output)

        #self.dev_forget = qml.device(self.backend, wires=self.n_qubits)
        #self.dev_input = qml.device(self.backend, wires=self.n_qubits)
        #self.dev_update = qml.device(self.backend, wires=self.n_qubits)
        #self.dev_output = qml.device(self.backend, wires=self.n_qubits)
        
        def ansatz(params, wires_type):
            # Entangling layer.
            for i in range(1,3): 
                for j in range(self.n_qubits):
                    if j + i < self.n_qubits:
                        qml.CNOT(wires=[wires_type[j], wires_type[j + i]])
                    else:
                        qml.CNOT(wires=[wires_type[j], wires_type[j + i - self.n_qubits]])

            # Variational layer.
            for i in range(self.n_qubits):
                qml.RX(params[0][i], wires=wires_type[i])
                qml.RY(params[1][i], wires=wires_type[i])
                qml.RZ(params[2][i], wires=wires_type[i])
                
        def VQC(features, weights, wires_type):
            # Preproccess input data to encode the initial state.
            #qml.templates.AngleEmbedding(features, wires=wires_type)
            ry_params = [torch.arctan(feature) for feature in features]
            rz_params = [torch.arctan(feature**2) for feature in features]
            
            for i in range(self.n_qubits):
                qml.Hadamard(wires=wires_type[i])
                qml.RY(ry_params[i], wires=wires_type[i])
                qml.RZ(ry_params[i], wires=wires_type[i])
                
        
            #Variational block.
            qml.layer(ansatz, self.n_qlayers, weights, wires_type = wires_type)

        def _circuit_forget(inputs, weights):
            result = VQC(inputs, weights, self.wires_forget)
            return qml.expval(qml.PauliZ(wires=self.wires_forget[0]))

        self.qlayer_forget = qml.QNode(_circuit_forget, self.dev_forget, interface="torch")


        def _circuit_input(inputs, weights):
            VQC(inputs, weights, self.wires_input)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_input]
        self.qlayer_input = qml.QNode(_circuit_input, self.dev_input, interface="torch")

        def _circuit_update(inputs, weights):
            VQC(inputs, weights, self.wires_update)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_update]
        self.qlayer_update = qml.QNode(_circuit_update, self.dev_update, interface="torch")

        def _circuit_output(inputs, weights):
            VQC(inputs, weights, self.wires_output)
            return [qml.expval(qml.PauliZ(wires=i)) for i in self.wires_output]
        self.qlayer_output = qml.QNode(_circuit_output, self.dev_output, interface="torch")

        weight_shapes = {"weights": (self.n_qlayers, self.n_vrotations, self.n_qubits)}
        print(f"weight_shapes = (n_qlayers, n_vrotations, n_qubits) = ({self.n_qlayers}, {self.n_vrotations}, {self.n_qubits})")

        self.clayer_in = torch.nn.Linear(self.concat_size, self.n_qubits)
        self.VQC = {
            'forget': qml.qnn.TorchLayer(self.qlayer_forget, weight_shapes),
            'input': qml.qnn.TorchLayer(self.qlayer_input, weight_shapes),
            'update': qml.qnn.TorchLayer(self.qlayer_update, weight_shapes),
            'output': qml.qnn.TorchLayer(self.qlayer_output, weight_shapes)
        }
        self.clayer_out = torch.nn.Linear(self.n_qubits, self.hidden_size)
        #self.clayer_out = [torch.nn.Linear(n_qubits, self.hidden_size) for _ in range(4)]

    def forward(self, x, init_states=None):
        '''
        x.shape is (batch_size, seq_length, feature_size)
        recurrent_activation -> sigmoid
        activation -> tanh
        '''
        if self.batch_first is True:
            batch_size, seq_length, features_size = x.size()
        else:
            seq_length, batch_size, features_size = x.size()

        hidden_seq = []
        if init_states is None:
            h_t = torch.zeros(batch_size, self.hidden_size)  # hidden state (output)
            c_t = torch.zeros(batch_size, self.hidden_size)  # cell state
        else:
            # for now we ignore the fact that in PyTorch you can stack multiple RNNs
            # so we take only the first elements of the init_states tuple init_states[0][0], init_states[1][0]
            h_t, c_t = init_states
            h_t = h_t[0]
            c_t = c_t[0]

        for t in range(seq_length):
            # get features from the t-th element in seq, for all entries in the batch
            x_t = x[:, t, :]
            
            # Concatenate input and hidden state
            v_t = torch.cat((h_t, x_t), dim=1)

            # match qubit dimension
            y_t = self.clayer_in(v_t)

            f_t = torch.sigmoid(self.clayer_out(self.VQC['forget'](y_t)))  # forget block
            i_t = torch.sigmoid(self.clayer_out(self.VQC['input'](y_t)))  # input block
            g_t = torch.tanh(self.clayer_out(self.VQC['update'](y_t)))  # update block
            o_t = torch.sigmoid(self.clayer_out(self.VQC['output'](y_t))) # output block

            c_t = (f_t * c_t) + (i_t * g_t)
            h_t = o_t * torch.tanh(c_t)

            hidden_seq.append(h_t.unsqueeze(0))
        hidden_seq = torch.cat(hidden_seq, dim=0)
        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
        return hidden_seq, (h_t, c_t)
    
class QShallowRegressionLSTM(nn.Module):

    def __init__(self, num_sensors, hidden_units, n_qubits=0, n_qlayers=1):
        super().__init__()
        self.num_sensors = num_sensors  # this is the number of features
        self.hidden_units = hidden_units
        self.num_layers = 1

        #self.lstm = nn.LSTM(
        #    input_size=num_sensors,
        #    hidden_size=hidden_units,
        #    batch_first=True,
        #    num_layers=self.num_layers
        #)
        
        self.lstm = QLSTM(
            input_size=num_sensors,
            hidden_size=hidden_units,
            batch_first=True,
            n_qubits = n_qubits,
            n_qlayers= n_qlayers
        )

        self.linear = nn.Linear(in_features=self.hidden_units, out_features=1)

    def forward(self, x):
        batch_size = x.shape[0]
        h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
        c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
        
        _, (hn, _) = self.lstm(x, (h0, c0))
        out = self.linear(hn).flatten()  # First dim of Hn is num_layers, which is set to 1 above.

        return out

The Error I’m getting is this:

{
“name”: “IndexError”,
“message”: “list index out of range”,
“stack”: "---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
Cell In[20], line 5
3 print("Untrained test
--------")
4 start = time.time()
----> 5 test_loss = test_model(test_loader, Qmodel, loss_function)
6 end = time.time()
7 print("Execution time", end - start)

Cell In[9], line 28, in test_model(data_loader, model, loss_function)
26 with torch.no_grad():
27 for X, y in data_loader:
—> 28 output = model(X)
29 total_loss += loss_function(output, y).item()
31 avg_loss = total_loss / num_batches

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don’t have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None

File c:\Users\z004yk0e\QLSTM\SoftServe_QLSTM-main\Factory.py:232, in QShallowRegressionLSTM.forward(self, x)
229 h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
230 c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
→ 232 _, (hn, _) = self.lstm(x, (h0, c0))
233 out = self.linear(hn).flatten() # First dim of Hn is num_layers, which is set to 1 above.
235 return out

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don’t have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None

File c:\Users\z004yk0e\QLSTM\SoftServe_QLSTM-main\Factory.py:190, in QLSTM.forward(self, x, init_states)
187 # match qubit dimension
188 y_t = self.clayer_in(v_t)
→ 190 f_t = torch.sigmoid(self.clayer_out(self.VQC’forget’)) # forget block
191 i_t = torch.sigmoid(self.clayer_out(self.VQC’input’)) # input block
192 g_t = torch.tanh(self.clayer_out(self.VQC’update’)) # update block

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\torch
n\modules\module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don’t have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\pennylane\qnn\torch.py:402, in TorchLayer.forward(self, inputs)
399 inputs = torch.reshape(inputs, (-1, inputs.shape[-1]))
401 # calculate the forward pass as usual
→ 402 results = self._evaluate_qnode(inputs)
404 if isinstance(results, tuple):
405 if has_batch_dim:

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\pennylane\qnn\torch.py:428, in TorchLayer._evaluate_qnode(self, x)
416 """Evaluates the QNode for a single input datapoint.
417
418 Args:
(…)
422 tensor: output datapoint
423 """
424 kwargs = {
425 **{self.input_arg: x},
426 **{arg: weight.to(x) for arg, weight in self.qnode_weights.items()},
427 }
→ 428 res = self.qnode(**kwargs)
430 if isinstance(res, torch.Tensor):
431 return res.type(x.dtype)

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\pennylane\workflow\qnode.py:1092, in QNode.call(self, *args, **kwargs)
1089 override_shots = kwargs["shots"]
1091 # construct the tape
→ 1092 self.construct(args, kwargs)
1094 original_grad_fn = [self.gradient_fn, self.gradient_kwargs, self.device]
1095 self._update_gradient_fn(shots=override_shots, tape=self._tape)

File c:\Users\z004yk0e\.pyenv\pyenv-win\versions\3.12.2\Lib\site-packages\pennylane\workflow\qnode.py:929, in QNode.construct(self, args, kwargs)
926 self.interface = qml.math.get_interface(*args, *list(kwargs.values()))
928 with qml.queuing.AnnotatedQueue() as q:
→ 929 self._qfunc_output = self.func(*args, **kwargs)
931 self._tape = QuantumScript.from_queue(q, shots)
933 params = self.tape.get_parameters(trainable_only=False)

File c:\Users\z004yk0e\QLSTM\SoftServe_QLSTM-main\Factory.py:124, in QLSTM.init.._circuit_forget(inputs, weights)
123 def _circuit_forget(inputs, weights):
→ 124 result = VQC(inputs, weights, self.wires_forget)
125 return qml.expval(qml.PauliZ(wires=self.wires_forget[0]))

File c:\Users\z004yk0e\QLSTM\SoftServe_QLSTM-main\Factory.py:116, in QLSTM.init..VQC(features, weights, wires_type)
114 for i in range(self.n_qubits):
115 qml.Hadamard(wires=wires_type[i])
→ 116 qml.RY(ry_params[i], wires=wires_type[i])
117 qml.RZ(ry_params[i], wires=wires_type[i])
120 #Variational block.

IndexError: list index out of range"
}

Thanks! I can’t run your code to try and reproduce the error because there are things missing, but it seems like the issue doesn’t have to do with QNode returns? :thinking:

File c:\Users\z004yk0e\QLSTM\SoftServe_QLSTM-main\Factory.py:116, in QLSTM.init…VQC(features, weights, wires_type)
114 for i in range(self.n_qubits):
115 qml.Hadamard(wires=wires_type[i])
→ 116 qml.RY(ry_params[i], wires=wires_type[i])
117 qml.RZ(ry_params[i], wires=wires_type[i])
120 #Variational block.

Line 116 in Factory.py (qml.RY(ry_params[i], wires=wires_type[i])) is trying to access an element of an object that doesn’t exist. Akin to this:

>>> a = [0, 1, 2, 3, 4]
>>> a[5]
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
IndexError: list index out of range

This could either be ry_params[i] or wires_type[i], where the index i is too large :slight_smile:. Let me know if that helps!

This code is from the following github repo: GitHub - DikshantDulal/SoftServe_QLSTM: We implement a quantum-classical hybrid QLSTM model by incorporating quantum variational layers into the classical LSTM in order to improve the efficiency and trainability of LSTM for better stock price prediction.

Previously you have suggested that it has go to do with the change in QNode returns that were introduced as a default behaviour from v.0.30 to @lingling36109 .Isn’t that an issue? I thought that might be the case as this code is a bit old. Please let me know.

I have checked that the array length of features that goes into the VQC function is 4 which is the same as the no of qubits used. Same is the case with the no of wires. I’m not able to figure out why is it going out of range.