Backpropagarion error in hybrid model with Pytorch

Hi! I got some errors from my hybrid model code.

From the result of loss value between target and prediction, I checked that the forward process in my model has no problem.
However, the error from the backward process has occured.

The code for my hybrid model is as below:

class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = nn.Conv1d(1, 8, 30, 2)
self.conv2 = nn.Conv1d(8, 16, 20, 2)
self.conv3 = nn.Conv1d(16, 32, 10, 2)
self.dropout = nn.Dropout1d()
self.fc1 = nn.Linear(160, 64)
self.fc2 = nn.Linear(64, 16)

def forward(self, x):
    x = nn.functional.tanh(self.conv1(x))
    x = nn.functional.tanh(self.conv2(x))
    x = nn.functional.tanh(self.conv3(x))
    x = self.dropout(x)
    x = x.view(1, -1)
    x = nn.functional.tanh(self.fc1(x))
    x = self.fc2(x)
    x = qlayer(x)
    #print("output of qlayer:", x)
    
    #return torch.cat((x, 1 - x), -1)
    return x

RuntimeError Traceback (most recent call last)
Cell In[17], line 16
13 train_loader = DataLoader(tr_dataset,batch_size=1,shuffle=True,drop_last=True)
14 test_loader = DataLoader(te_dataset,batch_size=1,shuffle=False,drop_last=False)
—> 16 loss_list_train = train(train_loader=train_loader, epochs=epochs)
18 train_loss_df = pd.DataFrame(loss_list_train)
19 train_loss_df.to_csv(‘F:/Student/CJG/220916_QC/3_IBM_QLAB/2_result/0_hybrid/1_loss/Train_loss(ROI_’+str(ROI_info)+‘).csv’, index=False, header=None)

Cell In[16], line 16, in train(epochs, train_loader)
14 print(“loss:”,loss)
15 # Backward pass
—> 16 loss.backward()
17 # Optimize the weights
18 optimizer.step()

File ~\anaconda3\envs\pennylane\lib\site-packages\torch_tensor.py:488, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs)
478 if has_torch_function_unary(self):
479 return handle_torch_function(
480 Tensor.backward,
481 (self,),
(…)
486 inputs=inputs,
487 )
→ 488 torch.autograd.backward(
489 self, gradient, retain_graph, create_graph, inputs=inputs
490 )

File ~\anaconda3\envs\pennylane\lib\site-packages\torch\autograd_init_.py:197, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
192 retain_graph = create_graph
194 # The reason we repeat same the comment below is that
195 # some Python versions print out the first line of a multi-line function
196 # calls in the traceback and some print out the last line
→ 197 Variable.execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
198 tensors, grad_tensors
, retain_graph, create_graph, inputs,
199 allow_unreachable=True, accumulate_grad=True)

RuntimeError: function ExecuteTapesBackward returned a gradient different than None at position 26, but the corresponding forward input was not a Variable

Has anyone encountered a similar error?

Hi @junggu.choi, thank you for your question and welcome to the Forum!

I think the error occurs because of the way you have included the qlayer.

Have you tried running this demo on hybrid models with PyTorch? It may help you learn how to modify your code so that it works.

I have modified the example there so that you can see more easily the connection between the “init” in the HybridModel class, and the “forward”. Notice how you need to define the layers in the “init” and then use them in the “forward”. If you look closely at the “Net” class you had you never defined the qlayer there. Here’s the working example:

import pennylane as qml
import torch
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_moons

# Set random seeds
torch.manual_seed(42)
np.random.seed(42)

X, y = make_moons(n_samples=200, noise=0.1)
y_ = torch.unsqueeze(torch.tensor(y), 1)  # used for one-hot encoded labels
y_hot = torch.scatter(torch.zeros((200, 2)), 1, y_, 1)

c = ["#1f77b4" if y_ == 0 else "#ff7f0e" for y_ in y]  # colours for each class
plt.axis("off")
plt.scatter(X[:, 0], X[:, 1], c=c)
plt.show()

n_qubits = 2
dev = qml.device("default.qubit", wires=n_qubits)

@qml.qnode(dev)
def qnode(inputs, weights):
    qml.AngleEmbedding(inputs, wires=range(n_qubits))
    qml.BasicEntanglerLayers(weights, wires=range(n_qubits))
    return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]

n_layers = 6
weight_shapes = {"weights": (n_layers, n_qubits)}

qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)

class HybridModel(torch.nn.Module):
    def __init__(self):
    #def init(self):
        super().__init__()
        #super(HybridModel, self).init()
        self.fc1 = torch.nn.Linear(2, 2)
        self.qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)
        self.fc2 = torch.nn.Linear(2, 2)
        self.softmax = torch.nn.Softmax(dim=1)
        
    def forward(self, x):
        x = self.fc1(x)
        x = self.qlayer(x)
        x = self.fc2(x)
        return self.softmax(x)

model = HybridModel()

loss = torch.nn.L1Loss()

X = torch.tensor(X, requires_grad=True).float()
y_hot = y_hot.float()

batch_size = 5
batches = 200 // batch_size

data_loader = torch.utils.data.DataLoader(
    list(zip(X, y_hot)), batch_size=5, shuffle=True, drop_last=True
)

opt = torch.optim.SGD(model.parameters(), lr=0.2)
epochs = 6

for epoch in range(epochs):

    running_loss = 0

    for xs, ys in data_loader:
        opt.zero_grad()

        loss_evaluated = loss(model(xs), ys)
        loss_evaluated.backward()

        opt.step()

        running_loss += loss_evaluated

    avg_loss = running_loss / batches
    print("Average loss over epoch {}: {:.4f}".format(epoch + 1, avg_loss))

y_pred = model(X)
predictions = torch.argmax(y_pred, axis=1).detach().numpy()

correct = [1 if p == p_true else 0 for p, p_true in zip(predictions, y)]
accuracy = sum(correct) / len(correct)
print(f"Accuracy: {accuracy * 100}%")

Please let me know if this makes things more clear for you in order to move forward with your implementation.

I hope this helps!

@CatalinaAlbornoz
Thank you so much! I write a reply for your response to late :frowning:
It was so helpful for me!

I’m glad this was helpful for you!