Hi all,
I am working on a problem where my input feature size is 64. I wanted to encode my features in qubit amplitudes. Given the input size I need atleast 6 qubits to encode my data. Works fine…
However when I increased the number of qubits, the input size is same (64). I did use the pad-with
argument in AmplitudeEmbedding
function. around a week back it was working fine. can some please have a look. Below is my code snippet and error traceback.
Sample Code:
import pennylane as qml
from pennylane import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
dataset import and split
x_train, x_test = datasets.load_digits().data, datasets.load_digits().target
x_train, x_test, y_train, y_test = train_test_split(
x_train, x_test, test_size=0.25, random_state=42)
Qlayer
qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=n_qubits)
QNODE
n_qubits = 8
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
qml.templates.AmplitudeEmbedding(inputs, wires=range(n_qubits), pad_with = 0., normalize = True)
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits), rotation = qml.RY)
return [qml.expval(qml.PauliZ(wires=[i])) for i in range(n_qubits)]
n_layers = 4
weight_shapes = {"weights": (n_layers, n_qubits)}
Model
clayer = tf.keras.layers.Dense(10, activation="softmax")
model = tf.keras.models.Sequential([qlayer, clayer])
opt = tf.keras.optimizers.SGD(learning_rate=0.2)
model.compile(opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
training
history = model.fit(x_train, y_train, epochs=50, batch_size=16, validation_data=(x_test, y_test))
Error Traceback:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-120-394f1966082d> in <module>
----> 1 history = model.fit(x_train, y_train, epochs=50, batch_size=16, validation_data=(x_test, y_test))
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1181 _r=1):
1182 callbacks.on_train_batch_begin(step)
-> 1183 tmp_logs = self.train_function(iterator)
1184 if data_handler.should_sync:
1185 context.async_wait()
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in train_function(iterator)
853 def train_function(iterator):
854 """Runs a training execution with one step."""
--> 855 return step_function(self, iterator)
856
857 else:
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in step_function(model, iterator)
843
844 data = next(iterator)
--> 845 outputs = model.distribute_strategy.run(run_step, args=(data,))
846 outputs = reduce_per_replica(
847 outputs, self.distribute_strategy, reduction='first')
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in run(***failed resolving arguments***)
1283 fn = autograph.tf_convert(
1284 fn, autograph_ctx.control_status_ctx(), convert_by_default=False)
-> 1285 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
1286
1287 def reduce(self, reduce_op, value, axis):
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
2831 kwargs = {}
2832 with self._container_strategy().scope():
-> 2833 return self._call_for_each_replica(fn, args, kwargs)
2834
2835 def _call_for_each_replica(self, fn, args, kwargs):
~\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
3606 def _call_for_each_replica(self, fn, args, kwargs):
3607 with ReplicaContext(self._container_strategy(), replica_id_in_sync_group=0):
-> 3608 return fn(*args, **kwargs)
3609
3610 def _reduce_to(self, reduce_op, value, destinations, options):
~\Anaconda3\lib\site-packages\tensorflow\python\autograph\impl\api.py in wrapper(*args, **kwargs)
595 def wrapper(*args, **kwargs):
596 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):
--> 597 return func(*args, **kwargs)
598
599 if inspect.isfunction(func) or inspect.ismethod(func):
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in run_step(data)
836
837 def run_step(data):
--> 838 outputs = model.train_step(data)
839 # Ensure counter is updated only if `train_step` succeeds.
840 with ops.control_dependencies(_minimum_control_deps(outputs)):
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in train_step(self, data)
793 # Run forward pass.
794 with backprop.GradientTape() as tape:
--> 795 y_pred = self(x, training=True)
796 loss = self.compiled_loss(
797 y, y_pred, sample_weight, regularization_losses=self.losses)
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1028 with autocast_variable.enable_auto_cast_variables(
1029 self._compute_dtype_object):
-> 1030 outputs = call_fn(inputs, *args, **kwargs)
1031
1032 if self._activity_regularizer:
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\sequential.py in call(self, inputs, training, mask)
378 if not self.built:
379 self._init_graph_network(self.inputs, self.outputs)
--> 380 return super(Sequential, self).call(inputs, training=training, mask=mask)
381
382 outputs = inputs # handle the corner case where self.layers is empty
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py in call(self, inputs, training, mask)
418 a list of tensors if there are more than one outputs.
419 """
--> 420 return self._run_internal_graph(
421 inputs, training=training, mask=mask)
422
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py in _run_internal_graph(self, inputs, training, mask)
554
555 args, kwargs = node.map_arguments(tensor_dict)
--> 556 outputs = node.layer(*args, **kwargs)
557
558 # Update tensor_dict.
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1028 with autocast_variable.enable_auto_cast_variables(
1029 self._compute_dtype_object):
-> 1030 outputs = call_fn(inputs, *args, **kwargs)
1031
1032 if self._activity_regularizer:
~\Anaconda3\lib\site-packages\pennylane\qnn\keras.py in call(self, inputs)
299 reconstructor = []
300 for x in tf.unstack(inputs):
--> 301 reconstructor.append(self.call(x))
302 return tf.stack(reconstructor)
303
~\Anaconda3\lib\site-packages\pennylane\qnn\keras.py in call(self, inputs)
302 return tf.stack(reconstructor)
303
--> 304 return self._evaluate_qnode(inputs)
305
306 def _evaluate_qnode(self, x):
~\Anaconda3\lib\site-packages\pennylane\qnn\keras.py in _evaluate_qnode(self, x)
317 **{k: 1.0 * w for k, w in self.qnode_weights.items()},
318 }
--> 319 return self.qnode(**kwargs)
320
321 def compute_output_shape(self, input_shape):
~\Anaconda3\lib\site-packages\pennylane\qnode.py in __call__(self, *args, **kwargs)
564
565 # construct the tape
--> 566 self.construct(args, kwargs)
567
568 cache = self.execute_kwargs.get("cache", False)
~\Anaconda3\lib\site-packages\pennylane\qnode.py in construct(self, args, kwargs)
481
482 with self.tape:
--> 483 self._qfunc_output = self.func(*args, **kwargs)
484 self._tape._qfunc_output = self._qfunc_output
485
<ipython-input-117-45acf92435fe> in qnode(inputs, weights)
4 def qnode(inputs, weights):
5
----> 6 qml.templates.AmplitudeEmbedding(inputs, wires=range(n_qubits), pad_with = 0., normalize = True)
7 qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits), rotation = qml.RY)
8
~\Anaconda3\lib\site-packages\pennylane\templates\embeddings\amplitude.py in __init__(self, features, wires, pad_with, normalize, do_queue, id)
127 self.pad_with = pad_with
128 self.normalize = normalize
--> 129 features = self._preprocess(features, wires, pad_with, normalize)
130 super().__init__(features, wires=wires, do_queue=do_queue, id=id)
131
~\Anaconda3\lib\site-packages\pennylane\templates\embeddings\amplitude.py in _preprocess(features, wires, pad_with, normalize)
206 padding = [pad_with] * (2 ** len(wires) - n_features)
207 if (
--> 208 hasattr(feature_set, "device") and feature_set.device.type == "cuda"
209 ): # pragma: no cover
210 ## Torch tensor, send to same GPU
AttributeError: 'str' object has no attribute 'type'
Any help would be appreciated.
Thanks