Hello! If applicable, put your complete code example down below. Make sure that your code:
- is 100% self-contained — someone can copy-paste exactly what is here and run it to
reproduce the behaviour you are observing - includes comments
!pip install pennylane==0.23 --quiet
!pip install keras tensorflow keras-tuner --quiet
import tensorflow as tf
import pennylane as qml
import numpy as np
from pennylane import numpy as np
import matplotlib.pyplot as plt
import numpy as onp
from pennylane.qnn.keras import KerasLayer
from pennylane.templates import AmplitudeEmbedding, AngleEmbedding
# Quantum Components
import pennylane as qml
from pennylane import numpy as np
def initialize_quantum_circuit(n_qubits, n_layers):
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev, interface='tf')
def quantum_circuit(inputs, weights):
# Step 1: Initialize with Hadamard to all qubits for superposition
for i in range(n_qubits):
qml.Hadamard(wires=i)
# Step 2: Encode classical input into quantum rotation
for i in range(n_qubits):
qml.RY(inputs[i], wires=i)
# Step 3: Add parameterized rotation layers with entanglement
for layer in range(n_layers):
for i in range(n_qubits):
qml.RY(weights[layer, i, 0], wires=i)
qml.RZ(weights[layer, i, 1], wires=i)
for i in range(n_qubits - 1):
qml.CNOT(wires=[i, i+1])
# Step 4: Measurement (return expectation values)
return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]
weight_shapes = {"weights": (n_layers, n_qubits, 2)}
#return qml.qnn.KerasLayer(quantum_circuit, weight_shapes, output_dim=n_qubits)
return KerasLayer(quantum_circuit, weight_shapes, output_dim=n_qubits)
class QuantumEncodingLayer(tf.keras.layers.Layer):
def __init__(self, n_qubits=4, n_layers=3, **kwargs):
super().__init__(**kwargs)
self.n_qubits = n_qubits
self.n_layers = n_layers
self.quantum_circuit = initialize_quantum_circuit(n_qubits, n_layers)
# self.quantum_circuit = initialize_quantum_circuit(n_qubits, n_layers)
# self.quantum_layer = tf.keras.layers.Lambda(lambda x: self.quantum_circuit(x))
quantum_features = self.quantum_layer(inputs[:, :self.n_qubits])
# Classical post-processing layers
self.post_process = tf.keras.Sequential([
layers.Dense(32, activation='relu'),
layers.Dense(n_qubits)
])
def call(self, inputs):
# Ensure inputs are normalized and of correct shape
inputs = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=-1))(inputs)
quantum_features = tf.vectorized_map(
lambda x: tf.cast(self.quantum_circuit(x[:self.n_qubits]), tf.float32),
inputs
)
return self.post_process(quantum_features)
# Enhanced CNN architecture
x = layers.Conv2D(64, 3, activation='relu', padding='same')(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Dropout(0.3)(x)
x = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
# Intermediate dense layers with feature extraction points
dense1 = layers.Dense(256, activation='relu', name='dense1')(x)
dense1 = layers.BatchNormalization()(dense1)
dense1 = layers.Dropout(0.4)(dense1)
dense2 = layers.Dense(128, activation='relu', name='dense2')(dense1)
dense2 = layers.BatchNormalization()(dense2)
dense2 = layers.Dropout(0.3)(dense2)
dense3 = layers.Dense(64, activation='relu', name='dense3')(dense2)
# Output layer for classification
output = layers.Dense(10, activation='softmax', name='output')(dense3)
return Model(inputs=inputs, outputs=[dense1, dense2, dense3, output], name='feature_extractor')
# Enhanced Hybrid Model
class EnhancedHybridModel:
def __init__(self, input_shape=(28,28,1), n_qubits=4, n_layers=3):
self.input_shape = input_shape
self.n_qubits = n_qubits
self.n_layers = n_layers
self.feature_extractor = build_feature_extractor(input_shape)
self.quantum_layer = QuantumEncodingLayer(n_qubits, n_layers)
self.models = self._build_models()
def _build_models(self):
input_layer = layers.Input(shape=self.input_shape)
# Extract features from different layers
dense1, dense2, dense3, output_logits = self.feature_extractor(input_layer)
# Quantum encoding on different feature levels
quantum_features2 = self.quantum_layer(dense2[:, :self.n_qubits])
quantum_features3 = self.quantum_layer(dense3[:, :self.n_qubits])
quantum_output = self.quantum_layer(output_logits[:, :self.n_qubits])
# Build hybrid model with all features
combined = layers.Concatenate()([
dense1, dense2, dense3,
quantum_features2, quantum_features3, quantum_output
])
# Enhanced classical post-processing
x = layers.Dense(512, activation='relu', kernel_initializer='he_normal')(combined)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.3)(x)
output = layers.Dense(10, activation='softmax')(x)
model = Model(inputs=input_layer, outputs=output)
# Compile model
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
# Instantiate the model
hybrid = EnhancedHybridModel(input_shape=(28, 28, 1), n_qubits=4, n_layers=3)
If you want help with diagnosing an error, please put the full error message below:
# ---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipython-input-2719449584.py in <cell line: 0>()
45
46 # Instantiate the model
---> 47 hybrid = EnhancedHybridModel(input_shape=(28, 28, 1), n_qubits=4, n_layers=3)
48
49 # Get the compiled Keras model
4 frames
/usr/local/lib/python3.11/dist-packages/keras/src/layers/layer.py in __init__(self, activity_regularizer, trainable, dtype, autocast, name, **kwargs)
289 self._input_shape_arg = input_shape_arg
290 if kwargs:
--> 291 raise ValueError(
292 "Unrecognized keyword arguments "
293 f"passed to {self.__class__.__name__}: {kwargs}"
ValueError: Unrecognized keyword arguments passed to KerasLayer: {'dynamic': True}
And, finally, make sure to include the versions of your packages. Specifically, show us the output of qml.about()
.