Hi @CatalinaAlbornoz,
Please find a sample full code below causing the error as I mentioned above.
Imports
import pennylane as qml
from pennylane import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
load dataset
x_train, x_test = datasets.load_digits().data, datasets.load_digits().target
Split train and test data
x_train, x_test, y_train, y_test = train_test_split(
x_train, x_test, test_size=0.25, random_state=42)
quantum circuit
n_qubits = 6
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
qml.templates.AmplitudeEmbedding(inputs, wires=range(n_qubits), pad_with=2, normalize = True)
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits), rotation = qml.RY)
return [qml.expval(qml.PauliZ(wires=[0]))]
n_layers = 1
weight_shapes = {"weights": (n_layers, n_qubits)}
convert quantum circuit to Keraslayer
qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=n_qubits)
creating and compiling neural network model
clayer = tf.keras.layers.Dense(10, activation="softmax")
model = tf.keras.models.Sequential([qlayer, clayer])
opt = tf.keras.optimizers.SGD(learning_rate=0.2)
model.compile(opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
Start training
history = model.fit(x_train, y_train, epochs=5, batch_size=16, validation_data=(x_test, y_test))
Error with full traceback
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-61-92f97b8c4386> in <module>
----> 1 history = model.fit(x_train, y_train, epochs=5, batch_size=16, validation_data=(x_test, y_test))
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1181 _r=1):
1182 callbacks.on_train_batch_begin(step)
-> 1183 tmp_logs = self.train_function(iterator)
1184 if data_handler.should_sync:
1185 context.async_wait()
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in train_function(iterator)
853 def train_function(iterator):
854 """Runs a training execution with one step."""
--> 855 return step_function(self, iterator)
856
857 else:
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in step_function(model, iterator)
843
844 data = next(iterator)
--> 845 outputs = model.distribute_strategy.run(run_step, args=(data,))
846 outputs = reduce_per_replica(
847 outputs, self.distribute_strategy, reduction='first')
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in run(***failed resolving arguments***)
1283 fn = autograph.tf_convert(
1284 fn, autograph_ctx.control_status_ctx(), convert_by_default=False)
-> 1285 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
1286
1287 def reduce(self, reduce_op, value, axis):
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
2831 kwargs = {}
2832 with self._container_strategy().scope():
-> 2833 return self._call_for_each_replica(fn, args, kwargs)
2834
2835 def _call_for_each_replica(self, fn, args, kwargs):
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
3606 def _call_for_each_replica(self, fn, args, kwargs):
3607 with ReplicaContext(self._container_strategy(), replica_id_in_sync_group=0):
-> 3608 return fn(*args, **kwargs)
3609
3610 def _reduce_to(self, reduce_op, value, destinations, options):
~\Anaconda3\lib\site-packages\tensorflow\python\autograph\impl\api.py in wrapper(*args, **kwargs)
595 def wrapper(*args, **kwargs):
596 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):
--> 597 return func(*args, **kwargs)
598
599 if inspect.isfunction(func) or inspect.ismethod(func):
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in run_step(data)
836
837 def run_step(data):
--> 838 outputs = model.train_step(data)
839 # Ensure counter is updated only if `train_step` succeeds.
840 with ops.control_dependencies(_minimum_control_deps(outputs)):
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in train_step(self, data)
793 # Run forward pass.
794 with backprop.GradientTape() as tape:
--> 795 y_pred = self(x, training=True)
796 loss = self.compiled_loss(
797 y, y_pred, sample_weight, regularization_losses=self.losses)
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1028 with autocast_variable.enable_auto_cast_variables(
1029 self._compute_dtype_object):
-> 1030 outputs = call_fn(inputs, *args, **kwargs)
1031
1032 if self._activity_regularizer:
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\sequential.py in call(self, inputs, training, mask)
378 if not self.built:
379 self._init_graph_network(self.inputs, self.outputs)
--> 380 return super(Sequential, self).call(inputs, training=training, mask=mask)
381
382 outputs = inputs # handle the corner case where self.layers is empty
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py in call(self, inputs, training, mask)
418 a list of tensors if there are more than one outputs.
419 """
--> 420 return self._run_internal_graph(
421 inputs, training=training, mask=mask)
422
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py in _run_internal_graph(self, inputs, training, mask)
554
555 args, kwargs = node.map_arguments(tensor_dict)
--> 556 outputs = node.layer(*args, **kwargs)
557
558 # Update tensor_dict.
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1011 training=training_mode):
1012
-> 1013 input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
1014 if eager:
1015 call_fn = self.call
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
249 value = value.value
250 if value is not None and shape_as_list[int(axis)] not in {value, None}:
--> 251 raise ValueError(
252 'Input ' + str(input_index) + ' of layer ' + layer_name + ' is'
253 ' incompatible with the layer: expected axis ' + str(axis) +
ValueError: Input 0 of layer dense_13 is incompatible with the layer: expected axis -1 of input shape to have value 6 but received input with shape (16, 1)
Hope it makes the issue clear. Thanks for the help.
Regards,
M. Kashif