Issue with the set_weights in qml.qnn keras quantum layer

I am working with the hybrid version [clayer1, qlayer, clayer2] MNIST digits. There are two issues:

  1. When i print summary of the model. It shows “unused” in front of qlayer in the summary table of the model.
  2. I used get_weights, it shows len=5 in weight list. And on using set_weights, it says that model is expecting 4 weights instead of 5.

How can we have same shape and length of model weights before and after training?
code of the model is as:

n_qubits = 2
num_layers = 1

#qml.enable_tape()

dev = qml.device(“default.qubit”, wires = n_qubits)

@qml.qnode(dev, interface=“tf”, diff_method=“backprop”)
def circuit(inputs, weights):

qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]

class Simple:
@staticmethod
def build(shape):

    n_qubits = 2
    num_layers = 1

    weight_shapes = {"weights": (num_layers,n_qubits)}
 
    tf.keras.backend.set_floatx('float64')

    clayer1 = tf.keras.layers.Dense(2, input_shape=(shape,))

    qlayer = qml.qnn.KerasLayer(circuit, weight_shapes, output_dim=n_qubits)

    clayer2 = tf.keras.layers.Dense(2, activation='softmax',)

    model = tf.keras.models.Sequential([clayer1, qlayer, clayer2])

    return model

smlp = Simple()
model = smlp.build(X_train.shape[1])

Model: “sequential_1”


Layer (type) Output Shape Param #

dense_2 (Dense) (None, 2) 6

keras_layer_1 (KerasLayer) (None, 2) 0 (unused)

dense_3 (Dense) (None, 2) 6

=================================================================
Total params: 12
Trainable params: 12
Non-trainable params: 0

print(“Weights and biases of the layers BEFORE training the model: \n”)
for layer in model.layers:
print(layer.name)
print(“Weights”)
print("Shape: ",‘\n’,layer.get_weights())

weights = model.get_weights()
print(“len of weights BEFORE”, len(weights))

model.compile(optimizer, loss, metrics)

fitting = model.fit(X_train, y_train, epochs=1, batch_size=5)

weights = model.get_weights()
print(“len of weights AFTER”, len(weights))

print(“Weights and biases of the layers AFTER training the model: \n”)
for layer in model.layers:
print(layer.name)
print(“Weights”)
print("Shape: ",‘\n’,layer.get_weights())

Hi @Amandeep,

This can be fixed by doing a forward pass through the model before printing the summary, e.g:

from sklearn.datasets import make_moons

import pennylane as qml

import tensorflow as tf

X_train, y = make_moons(n_samples=200, noise=0.1)

n_qubits = 2

num_layers = 1

dev = qml.device('default.qubit', wires = n_qubits)

@qml.qnode(dev, interface='tf', diff_method='backprop')

def circuit(inputs, weights):

    qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))

    qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))

    return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]

shape = X_train.shape[1]

n_qubits = 2

num_layers = 1

weight_shapes = {"weights": (num_layers,n_qubits)}

tf.keras.backend.set_floatx('float64')

clayer1 = tf.keras.layers.Dense(2, input_shape=(shape,))

qlayer = qml.qnn.KerasLayer(circuit, weight_shapes, output_dim=n_qubits)

clayer2 = tf.keras.layers.Dense(2, activation='softmax',)

model = tf.keras.models.Sequential([clayer1, qlayer, clayer2])

# Add a forward pass before printing summary

model.add(clayer1)

model.add(qlayer)

model.add(clayer2)

model(X_train[:2])

model.summary()

I hope this helps!

@CatalinaAlbornoz Yes, it worked. Thank you so much.

1 Like