QNSPSA error using a dataset

Hello PennyLane Community!

I’m here to gather insights, information and, why not, code examples.

During my research, I was interested in reproducing the paper Variational quantum algorithm for unconstrained black box binary optimization: Application to feature selection by Christa Zoufal et al.

My first approach was to use the optimizer SPSA using a cost function, simple \sum_i(\hat{y}_i - y_i)^2. This approach works using amplitude embedding and the custom ansatz from the paper. In the paper, they used the QNSPSA optimizer. I know that the behaviour and implementation are very different. I used it directly on the ansatz because it requires an qml.expval() function to run.

I used the dataset German Credit Risk, which is available on Kaggle.

Here is my code:

# Modules' importation
import pennylane as qml 
from pennylane import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder

# variables 
num_qubits = 8 
dev = qml.device('lightning.qubit', wires = num_qubits, shots=10000) # lightning.qubit
theta = np.array(list(range(4*num_qubits)))/(2*num_qubits)
# Data importation 
data = pd.read_csv('german.data', sep = ' ')
# data processing 
enc = OneHotEncoder(handle_unknown='ignore')
X = data.iloc[:,:-1]
y = data.iloc[:,-1]
X_cat = data.select_dtypes(include=['object'])
enc.fit(X_cat)
# rename columns 
names = list()
for i in enc.categories_:
    names.extend(i)
# apply the OneEncoding 
X_transform = pd.DataFrame(enc.transform(X_cat).toarray(), columns = names)
data_result = pd.concat([data.select_dtypes(exclude=['object']), X_transform], axis=1)

# basic functions 
def amplitudes(f=None, num_qubits=None):
    qml.AmplitudeEmbedding(features=f, pad_with=0.,wires=range(num_qubits),normalize=True)

def variational_classifier(weights, x, num_qubits, depth, bias):
    '''
    Build the parametrized circuit with weights, x and bias term
    Args:
        - weights: rotation angles 
        - bias: classical term to add more freedom to the VQA
        - x: input vector/data 
    Returns: 
        - parametrized circuit with a bias term 
    '''
    return circuit(weights, x, num_qubits, depth) + bias

def square_loss(labels, predictions):
    '''
    Compute the cost function
    Args:
        - labels: Ground truth
        - predictions: Predicted values 
    Returns: 
        - Mean of the square error between labels and predictions = model's error 
    '''
    
    # We use a call to qml.math.stack to allow subtracting the arrays directly
    #print(labels, predictions)
    return np.mean((labels - qml.math.stack(predictions)) ** 2)

def accuracy(labels, predictions):
    '''
    Compute the accuracy of the model
    Args:
        - labels: Ground truth
        - predictions: Predicted values 
    Returns: 
        - accuracy
    '''
    acc = sum(abs(l - p) < 1e-5 for l, p in zip(labels, predictions))
    acc = acc / len(labels)
    return acc

@qml.qnode(dev)
def ansatz_2(theta:list, num_qubits=10, depth=1):
    '''
    
    '''
    step = 0
    for _ in range(depth):
        for i in range(num_qubits):
            qml.RY(theta[i+step], wires=i)
        for i in range(num_qubits-1):
            qml.CNOT([i,i+1])
        for i in range(num_qubits):
            qml.RY(theta[i+step], wires=i)
        step += num_qubits
        
    return qml.expval(qml.PauliZ(0))

# Run the algorithms
Y_ = y
Y_ = Y_ * 2 - 3
X_ = data_result

depth = 1
batch_size = 5*depth 

weights_init = 0.01 * np.random.randn(2*depth*num_qubits, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)

weights = weights_init
bias = bias_init

num_qubits = 8 
cost_saved = []
for it in range(100):
    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X_), (batch_size,))
    X_batch = X_[batch_index]
    Y_batch = Y_[batch_index]
    
    params = (weights, X_batch, num_qubits, depth)
    params, loss = opt.step_and_cost(circuit, params)
    if i % 10 == 0:
        print(f"Step {i}: cost = {loss:.4f}")

I got this error message:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[31], line 26
     23 Y_batch = Y_[batch_index]
     25 params = (weights, X_batch, num_qubits, depth)
---> 26 params, loss = opt.step_and_cost(circuit, params)
     27 if i % 10 == 0:
     28     print(f"Step {i}: cost = {loss:.4f}")

File ~/Concordia/lib/python3.10/site-packages/pennylane/optimize/qnspsa.py:185, in QNSPSAOptimizer.step_and_cost(self, cost, *args, **kwargs)
    172 def step_and_cost(self, cost, *args, **kwargs):
    173     r"""Update trainable parameters with one step of the optimizer and return
    174     the corresponding objective function value after the step.
    175 
   (...)
    183         function output prior to the step
    184     """
--> 185     params_next = self._step_core(cost, args, kwargs)
    187     if not self.blocking:
    188         loss_curr = cost(*args, **kwargs)

File ~/Concordia/lib/python3.10/site-packages/pennylane/optimize/qnspsa.py:215, in QNSPSAOptimizer._step_core(self, cost, args, kwargs)
    213 grad_tapes, grad_dirs = self._get_spsa_grad_tapes(cost, args, kwargs)
    214 # metric_tapes contains 4 tapes for tensor estimation
--> 215 metric_tapes, tensor_dirs = self._get_tensor_tapes(cost, args, kwargs)
    216 all_grad_tapes += grad_tapes
    217 all_metric_tapes += metric_tapes

File ~/Concordia/lib/python3.10/site-packages/pennylane/optimize/qnspsa.py:408, in QNSPSAOptimizer._get_tensor_tapes(self, cost, args, kwargs)
    406     args_list[2][index] = arg + self.finite_diff_step * (-dir1 + dir2)
    407     args_list[3][index] = arg - self.finite_diff_step * dir1
--> 408 dir_vecs = (np.concatenate(dir1_list), np.concatenate(dir2_list))
    409 tapes = [
    410     self._get_overlap_tape(cost, args, args_finite_diff, kwargs)
    411     for args_finite_diff in args_list
    412 ]
    414 return tapes, dir_vecs

File ~/Concordia/lib/python3.10/site-packages/pennylane/numpy/wrapper.py:117, in tensor_wrapper.<locals>._wrapped(*args, **kwargs)
    114         tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])
    116 # evaluate the original object
--> 117 res = obj(*args, **kwargs)
    119 if isinstance(res, _np.ndarray):
    120     # only if the output of the object is a ndarray,
    121     # then convert to a PennyLane tensor
    122     res = tensor(res, **tensor_kwargs)

File ~/Concordia/lib/python3.10/site-packages/autograd/numpy/numpy_wrapper.py:38, in <lambda>(arr_list, axis)
     35 @primitive
     36 def concatenate_args(axis, *args):
     37     return _np.concatenate(args, axis).view(ndarray)
---> 38 concatenate = lambda arr_list, axis=0 : concatenate_args(axis, *arr_list)
     39 vstack = row_stack = lambda tup: concatenate([atleast_2d(_m) for _m in tup], axis=0)
     40 def hstack(tup):

File ~/Concordia/lib/python3.10/site-packages/autograd/tracer.py:48, in primitive.<locals>.f_wrapped(*args, **kwargs)
     46     return new_box(ans, trace, node)
     47 else:
---> 48     return f_raw(*args, **kwargs)

File ~/Concordia/lib/python3.10/site-packages/autograd/numpy/numpy_wrapper.py:37, in concatenate_args(axis, *args)
     35 @primitive
     36 def concatenate_args(axis, *args):
---> 37     return _np.concatenate(args, axis).view(ndarray)

ValueError: need at least one array to concatenate

It’s probably due to my lack of knowledge about QNSPSAOptimizer. So, if you have any code or tutorials (I used the one provided in PennyLane documentation/demos, it will be helpful.

And finally, I use PennyLane version 0.33.0.

Best

Hey @Christophe_Pere! Welcome to the forum :slight_smile:

Can you substitute the Kaggle dataset with some dummy data? That will help me be able to just copy-paste your code and try to replicate the issue :slight_smile:

Hi @isaacdevlugt,

Sorry for the long delay, it was a hard week. I built a notebook containing the SPSA approach and the QNSPSA using data generated by make_classification from sklearn.

You can find it here: VarQFS/QNSPSA-PennyLane.ipynb at main · Christophe-pere/VarQFS · GitHub

Normally the notebook is self-contained and produced the same error. I hope it will help you.

Hi there,

I made progress, I was able to run the QNSPSA with the ansatz ansatz_4 from the notebook. Now, I have to understand how to do it with the data preparation.

Best,

Hey @Christophe_Pere!

Apologies for the slow response. Were you able to solve your problem? Is there anything else I can help with?

Hi @isaacdevlugt,
Indeed, I was able to run QNSPSA and QNG with my ansatz, but I’m looking now at a way to incorporate data in batch during the optimization process. It’s still unclear to me. When I build a circuit with AmplitudeEncoding, I get a dimension error.

Hey @Christophe_Pere,

Could you attach some minimal code so that I can take a look and try to help?

Hi,

The code is in the first post. I was able to run QNSPSA just with the ansatz, but when I tried to use the circuit containing AmplitudeEmbedding and the ansatz, I obtained the error of concatenation described in the first post.

Hi @isaacdevlugt,

I made some progress using Training with QNG optimizer on circuit with data argument and Accelerating VQEs with quantum natural gradient.
I change my code to:

import pennylane as qml 
from pennylane import numpy as np
from sklearn import datasets 

data = datasets.make_classification(n_samples=1000, n_features=52, n_classes=2) 
num_qubits = 8
dev = qml.device("default.qubit", wires=num_qubits)

def ansatz_3(params, X, depth=2):
    '''
    
    '''
    #print(params)
    #amplitudes(X_batch, num_qubits=num_qubits)
    qml.AmplitudeEmbedding(features=X, pad_with=0.,wires=range(num_qubits),normalize=True)
    step = 0
    for _ in range(depth):
        for i in range(num_qubits):
            qml.RY(params[i+step], wires=i)
        for i in range(num_qubits-1):
            qml.CNOT([i,i+1])
        for i in range(num_qubits):
            qml.RY(params[i+step], wires=i)
        step += num_qubits

coeffs = [1 for i in range(num_qubits)]

obs = [qml.PauliZ(i) for i in range(num_qubits)]

H = qml.Hamiltonian(coeffs, obs)

@qml.qnode(dev, interface="autograd")
def cost_fn(params, X):
    ansatz_3(params, X)
    return qml.expval(H)

init_params = np.random.random(40)
max_iterations = 500
step_size = 0.5
conv_tol = 1e-06

opt = qml.QNGOptimizer(step_size, lam=0.001, approx="block-diag")

depth = 2
batch_size = 5*depth #5

X_ = data[0]#np.concatenate([X_new, X_transform.to_numpy()], axis=1) #data_res.to_numpy() X_new
Y_ = data[1] #y
Y_ = Y_ * 2 - 1


params = init_params
prev_energy = cost_fn(params, X_[:batch_size])
qngd_cost = []

for n in range(max_iterations):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X_), (batch_size,))
    X_batch = np.array(X_[batch_index], requires_grad=False)
    Y_batch = Y_[batch_index]

    
    params, prev_energy = opt.step_and_cost(cost_fn, params, X_batch)
    qngd_cost.append(prev_energy)

    energy = cost_fn(params, X_batch)
    conv = np.abs(energy - prev_energy)

    if n % 4 == 0:
        print(
            "Iteration = {:},  Energy = {:.8f} Ha".format(n, energy)
        )

    if conv <= conv_tol:
        break

But I got this new error:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[6], line 28
     24 X_batch = np.array(X_[batch_index], requires_grad=False)
     25 Y_batch = Y_[batch_index]
---> 28 params, prev_energy = opt.step_and_cost(cost_fn, params, X_batch)
     29 qngd_cost.append(prev_energy)
     31 energy = cost_fn(params, X_batch)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/optimize/qng.py:192, in QNGOptimizer.step_and_cost(self, qnode, grad_fn, recompute_tensor, metric_tensor_fn, *args, **kwargs)
    189 if metric_tensor_fn is None:
    190     metric_tensor_fn = qml.metric_tensor(qnode, approx=self.approx)
--> 192 _metric_tensor = metric_tensor_fn(*args, **kwargs)
    193 # Reshape metric tensor to be square
    194 shape = qml.math.shape(_metric_tensor)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/workflow/qnode.py:1040, in QNode.__call__(self, *args, **kwargs)
   1035     if hybrid:
   1036         argnums = full_transform_program[-1]._kwargs.pop(
   1037             "argnums", None
   1038         )  # pylint: disable=protected-access
-> 1040         full_transform_program._set_all_classical_jacobians(
   1041             self, args, kwargs, argnums
   1042         )  # pylint: disable=protected-access
   1043         full_transform_program._set_all_argnums(
   1044             self, args, kwargs, argnums
   1045         )  # pylint: disable=protected-access
   1047 # pylint: disable=unexpected-keyword-arg

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:420, in TransformProgram._set_all_classical_jacobians(self, qnode, args, kwargs, argnums)
    416     raise qml.QuantumFunctionError(
    417         "argnum does not work with the Jax interface. You should use argnums instead."
    418     )
    419 sub_program = TransformProgram(self[0:index])
--> 420 classical_jacobian = jacobian(
    421     classical_preprocessing, sub_program, argnums, *args, **kwargs
    422 )
    423 qnode.construct(args, kwargs)
    424 tapes, _ = sub_program((qnode.tape,))

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:376, in TransformProgram._set_all_classical_jacobians.<locals>.jacobian(classical_function, program, argnums, *args, **kwargs)
    373 classical_function = partial(classical_function, program)
    375 if qnode.interface == "autograd":
--> 376     jac = qml.jacobian(classical_function, argnum=argnums)(*args, **kwargs)
    378 if qnode.interface == "tf":
    379     import tensorflow as tf  # pylint: disable=import-outside-toplevel

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/_grad.py:455, in jacobian.<locals>._jacobian_function(*args, **kwargs)
    449 if not _argnum:
    450     warnings.warn(
    451         "Attempted to differentiate a function with no trainable parameters. "
    452         "If this is unintended, please add trainable parameters via the "
    453         "'requires_grad' attribute or 'argnum' keyword."
    454     )
--> 455 jac = tuple(_jacobian(func, arg)(*args, **kwargs) for arg in _argnum)
    457 return jac[0] if unpack else jac

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/_grad.py:455, in <genexpr>(.0)
    449 if not _argnum:
    450     warnings.warn(
    451         "Attempted to differentiate a function with no trainable parameters. "
    452         "If this is unintended, please add trainable parameters via the "
    453         "'requires_grad' attribute or 'argnum' keyword."
    454     )
--> 455 jac = tuple(_jacobian(func, arg)(*args, **kwargs) for arg in _argnum)
    457 return jac[0] if unpack else jac

File ~/PinQ2_py/lib/python3.9/site-packages/autograd/wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File ~/PinQ2_py/lib/python3.9/site-packages/autograd/differential_operators.py:60, in jacobian(fun, x)
     50 @unary_to_nary
     51 def jacobian(fun, x):
     52     """
     53     Returns a function which computes the Jacobian of `fun` with respect to
     54     positional argument number `argnum`, which must be a scalar or array. Unlike
   (...)
     58     (out1, out2, ...) then the Jacobian has shape (out1, out2, ..., in1, in2, ...).
     59     """
---> 60     vjp, ans = _make_vjp(fun, x)
     61     ans_vspace = vspace(ans)
     62     jacobian_shape = ans_vspace.shape + vspace(x).shape

File ~/PinQ2_py/lib/python3.9/site-packages/autograd/core.py:10, in make_vjp(fun, x)
      8 def make_vjp(fun, x):
      9     start_node = VJPNode.new_root()
---> 10     end_value, end_node =  trace(start_node, fun, x)
     11     if end_node is None:
     12         def vjp(g): return vspace(x).zeros()

File ~/PinQ2_py/lib/python3.9/site-packages/autograd/tracer.py:10, in trace(start_node, fun, x)
      8 with trace_stack.new_trace() as t:
      9     start_box = new_box(x, t, start_node)
---> 10     end_box = fun(start_box)
     11     if isbox(end_box) and end_box._trace == start_box._trace:
     12         return end_box._value, end_box._node

File ~/PinQ2_py/lib/python3.9/site-packages/autograd/wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
     13 else:
     14     subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:355, in TransformProgram._set_all_classical_jacobians.<locals>.classical_preprocessing(program, *args, **kwargs)
    353 qnode.construct(args, kwargs)
    354 tape = qnode.qtape
--> 355 tapes, _ = program((tape,))
    356 res = tuple(qml.math.stack(tape.get_parameters(trainable_only=True)) for tape in tapes)
    357 if len(tapes) == 1:

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:477, in TransformProgram.__call__(self, tapes)
    475 if self._argnums is not None and self._argnums[i] is not None:
    476     tape.trainable_params = self._argnums[i][j]
--> 477 new_tapes, fn = transform(tape, *targs, **tkwargs)
    478 execution_tapes.extend(new_tapes)
    480 fns.append(fn)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/gradients/metric_tensor.py:82, in _expand_metric_tensor(tape, argnum, approx, allow_nonunitary, aux_wire, device_wires)
     80 if not allow_nonunitary and approx is None:
     81     return [qml.transforms.expand_nonunitary_gen(tape)], lambda x: x[0]
---> 82 return [qml.transforms.expand_multipar(tape)], lambda x: x[0]

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/tape_expand.py:102, in create_expand_fn.<locals>.expand_fn(tape, depth, **kwargs)
    100     tape = tape.expand(depth=depth)
    101 elif not all(stop_at(op) for op in tape.operations):
--> 102     tape = tape.expand(depth=depth, stop_at=stop_at)
    103 else:
    104     return tape

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/tape/qscript.py:912, in QuantumScript.expand(self, depth, stop_at, expand_measurements)
    867 def expand(self, depth=1, stop_at=None, expand_measurements=False):
    868     """Expand all operations to a specific depth.
    869 
    870     Args:
   (...)
    910     RY(0.2, wires=['a'])]
    911     """
--> 912     new_script = qml.tape.tape.expand_tape(
    913         self, depth=depth, stop_at=stop_at, expand_measurements=expand_measurements
    914     )
    915     new_script._update()
    916     return new_script

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/tape/tape.py:213, in expand_tape(tape, depth, stop_at, expand_measurements)
    210         continue
    212 # recursively expand out the newly created tape
--> 213 expanded_tape = expand_tape(obj, stop_at=stop_at, depth=depth - 1)
    215 new_ops.extend(expanded_tape.operations)
    216 new_measurements.extend(expanded_tape.measurements)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/tape/tape.py:213, in expand_tape(tape, depth, stop_at, expand_measurements)
    210         continue
    212 # recursively expand out the newly created tape
--> 213 expanded_tape = expand_tape(obj, stop_at=stop_at, depth=depth - 1)
    215 new_ops.extend(expanded_tape.operations)
    216 new_measurements.extend(expanded_tape.measurements)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/tape/tape.py:198, in expand_tape(tape, depth, stop_at, expand_measurements)
    196 if obj.has_decomposition:
    197     with QueuingManager.stop_recording():
--> 198         obj = QuantumScript(obj.decomposition(), _update=False)
    199 else:
    200     new_queue.append(obj)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/operation.py:1292, in Operator.decomposition(self)
   1280 def decomposition(self):
   1281     r"""Representation of the operator as a product of other operators.
   1282 
   1283     .. math:: O = O_1 O_2 \dots O_n
   (...)
   1290         list[Operator]: decomposition of the operator
   1291     """
-> 1292     return self.compute_decomposition(
   1293         *self.parameters, wires=self.wires, **self.hyperparameters
   1294     )

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/templates/state_preparations/mottonen.py:346, in MottonenStatePreparation.compute_decomposition(state_vector, wires)
    321 r"""Representation of the operator as a product of other operators.
    322 
    323 .. math:: O = O_1 O_2 \dots O_n.
   (...)
    343 CNOT(wires=['a', 'b'])]
    344 """
    345 if len(qml.math.shape(state_vector)) > 1:
--> 346     raise ValueError(
    347         "Broadcasting with MottonenStatePreparation is not supported. Please use the "
    348         "qml.transforms.broadcast_expand transform to use broadcasting with "
    349         "MottonenStatePreparation."
    350     )
    352 a = qml.math.abs(state_vector)
    353 omega = qml.math.angle(state_vector)

ValueError: Broadcasting with MottonenStatePreparation is not supported. Please use the qml.transforms.broadcast_expand transform to use broadcasting with MottonenStatePreparation.

Looking at the forum, the error seems to be due to the fact the qml.MottonenStatePreparation has no broadcasting implemented: Parameter broadcast bug

But how to fix it? Because I don’t use qml.MottonenStatePrepration or is it used in qml.AmplitudeEmbedding?

Update:
When I changed AmplitudeEmbedding to AngleEmbedding and reduced the dataset with 8 variables to match 8 qubits, the error of broadcast disappeared, but I got a new one :joy:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[38], line 29
     25 X_batch = np.array(X_[batch_index], requires_grad=False)
     26 Y_batch = Y_[batch_index]
---> 29 params, prev_energy = opt.step_and_cost(cost_fn, params, X_batch)
     30 qngd_cost.append(prev_energy)
     32 energy = cost_fn(params, X_batch)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/optimize/qng.py:192, in QNGOptimizer.step_and_cost(self, qnode, grad_fn, recompute_tensor, metric_tensor_fn, *args, **kwargs)
    189 if metric_tensor_fn is None:
    190     metric_tensor_fn = qml.metric_tensor(qnode, approx=self.approx)
--> 192 _metric_tensor = metric_tensor_fn(*args, **kwargs)
    193 # Reshape metric tensor to be square
    194 shape = qml.math.shape(_metric_tensor)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/workflow/qnode.py:1048, in QNode.__call__(self, *args, **kwargs)
   1043         full_transform_program._set_all_argnums(
   1044             self, args, kwargs, argnums
   1045         )  # pylint: disable=protected-access
   1047 # pylint: disable=unexpected-keyword-arg
-> 1048 res = qml.execute(
   1049     (self._tape,),
   1050     device=self.device,
   1051     gradient_fn=self.gradient_fn,
   1052     interface=self.interface,
   1053     transform_program=full_transform_program,
   1054     config=config,
   1055     gradient_kwargs=self.gradient_kwargs,
   1056     override_shots=override_shots,
   1057     **self.execute_kwargs,
   1058 )
   1060 res = res[0]
   1062 # convert result to the interface in case the qfunc has no parameters

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/workflow/execution.py:685, in execute(tapes, device, gradient_fn, interface, transform_program, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp)
    683 if no_interface_boundary_required:
    684     results = inner_execute(tapes)
--> 685     return post_processing(results)
    687 _grad_on_execution = False
    689 if (
    690     device_vjp
    691     and "lightning" in getattr(device, "short_name", "")
    692     and interface in jpc_interfaces
    693 ):

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:86, in _apply_postprocessing_stack(results, postprocessing_stack)
     63 """Applies the postprocessing and cotransform postprocessing functions in a Last-In-First-Out LIFO manner.
     64 
     65 Args:
   (...)
     83 
     84 """
     85 for postprocessing in reversed(postprocessing_stack):
---> 86     results = postprocessing(results)
     87 return results

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:56, in _batch_postprocessing(results, individual_fns, slices)
     30 def _batch_postprocessing(
     31     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     32 ) -> ResultBatch:
     33     """Broadcast individual post processing functions onto their respective tapes.
     34 
     35     Args:
   (...)
     54 
     55     """
---> 56     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:56, in <genexpr>(.0)
     30 def _batch_postprocessing(
     31     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     32 ) -> ResultBatch:
     33     """Broadcast individual post processing functions onto their respective tapes.
     34 
     35     Args:
   (...)
     54 
     55     """
---> 56     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/gradients/metric_tensor.py:504, in _metric_tensor_cov_matrix.<locals>.processing_fn(probs)
    502 scale = qml.math.convert_like(qml.math.outer(coeffs, coeffs), p)
    503 scale = qml.math.cast_like(scale, p)
--> 504 g = scale * qml.math.cov_matrix(p, obs, wires=tape.wires, diag_approx=diag_approx)
    505 for i, in_argnum in enumerate(params_in_argnum):
    506     # fill in rows and columns of zeros where a parameter was not in argnum
    507     if not in_argnum:

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/math/quantum.py:102, in cov_matrix(prob, obs, wires, diag_approx)
    100 eigvals = cast(o.eigvals(), dtype=float64)
    101 w = o.wires.labels if wires is None else wires.indices(o.wires)
--> 102 p = marginal_prob(prob, w)
    104 res = dot(eigvals**2, p) - (dot(eigvals, p)) ** 2
    105 variances.append(res)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/math/quantum.py:169, in marginal_prob(prob, axis)
    166     return prob
    168 inactive_wires = tuple(set(range(num_wires)) - set(axis))
--> 169 prob = np.reshape(prob, [2] * num_wires)
    170 prob = np.sum(prob, axis=inactive_wires)
    171 return np.flatten(prob)

File ~/PinQ2_py/lib/python3.9/site-packages/autoray/autoray.py:79, in do(fn, like, *args, **kwargs)
     30 """Do function named ``fn`` on ``(*args, **kwargs)``, peforming single
     31 dispatch to retrieve ``fn`` based on whichever library defines the class of
     32 the ``args[0]``, or the ``like`` keyword argument if specified.
   (...)
     76     <tf.Tensor: id=91, shape=(3, 3), dtype=float32>
     77 """
     78 backend = choose_backend(fn, *args, like=like, **kwargs)
---> 79 return get_lib_fn(backend, fn)(*args, **kwargs)

File <__array_function__ internals>:180, in reshape(*args, **kwargs)

File ~/PinQ2_py/lib/python3.9/site-packages/numpy/core/fromnumeric.py:298, in reshape(a, newshape, order)
    198 @array_function_dispatch(_reshape_dispatcher)
    199 def reshape(a, newshape, order='C'):
    200     """
    201     Gives a new shape to an array without changing its data.
    202 
   (...)
    296            [5, 6]])
    297     """
--> 298     return _wrapfunc(a, 'reshape', newshape, order=order)

File ~/PinQ2_py/lib/python3.9/site-packages/numpy/core/fromnumeric.py:57, in _wrapfunc(obj, method, *args, **kwds)
     54     return _wrapit(obj, method, *args, **kwds)
     56 try:
---> 57     return bound(*args, **kwds)
     58 except TypeError:
     59     # A TypeError occurs if the object does have such a method in its
     60     # class, but its signature is not identical to that of NumPy's. This
   (...)
     64     # Call _wrapit from within the except clause to ensure a potential
     65     # exception has a traceback chain.
     66     return _wrapit(obj, method, *args, **kwds)

ValueError: cannot reshape array of size 2560 into shape (2,2,2,2,2,2,2,2,2,2,2)
Click to add a cell.

Hey @Christophe_Pere,

Can you update me on the code you’re using so that I can try to replicate the issue?

Hi @isaacdevlugt,
Sure, I tested with QNGOptimizer and QNSPSAOptimizer and got multiple errors. I changed the interface to test if it helps in some way.

Code:

import pennylane as qml 
from pennylane import numpy as np
from sklearn import datasets 
# dataset generation
data = datasets.make_classification(n_samples=1000, n_features=52, n_classes=2) 
# setup parameters and simulator 
num_qubits = 8
dev = qml.device("default.qubit", wires=num_qubits)
# ansatz definition 
def ansatz_3(params, X, depth=2):
    '''
    
    '''
    qml.AngleEmbedding(features=X, wires=range(num_qubits))#,pad_with=0.,normalize=True)
    step = 0
    for _ in range(depth):
        for i in range(num_qubits):
            qml.RY(params[i+step], wires=i)
        for i in range(num_qubits-1):
            qml.CNOT([i,i+1])
        for i in range(num_qubits):
            qml.RY(params[i+step], wires=i)
        step += num_qubits

# Build an Hamiltonian with Z gates 
coeffs = [1 for i in range(num_qubits)]
obs = [qml.PauliZ(i) for i in range(num_qubits)]
H = qml.Hamiltonian(coeffs, obs)

@qml.qnode(dev, interface='autograd')#, interface="autograd")
def cost_fn(params, X):
    ansatz_3(params, X)
    return qml.expval(H)
init_params = np.random.random(32)
max_iterations = 500
step_size = 0.5
conv_tol = 1e-06

opt = qml.QNGOptimizer(step_size, lam=0.001, approx="block-diag")
#opt = qml.QNSPSAOptimizer(stepsize=1e-2)

depth = 2
batch_size = 5*depth #5

X_ = data[0][:, :8]#np.concatenate([X_new, X_transform.to_numpy()], axis=1) #data_res.to_numpy() X_new
Y_ = data[1] #y
Y_ = Y_ * 2 - 1


params = init_params
prev_energy = cost_fn(params, X_[:batch_size])
qngd_cost = []

for n in range(max_iterations):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X_), (batch_size,))
    X_batch = np.array(X_[batch_index], requires_grad=False)
    Y_batch = Y_[batch_index]

    
    params, prev_energy = opt.step_and_cost(cost_fn, params, X_batch)
    qngd_cost.append(prev_energy)

    energy = cost_fn(params, X_batch)
    conv = np.abs(energy - prev_energy)

    if n % 4 == 0:
        print(
            "Iteration = {:},  Energy = {:.8f} Ha".format(n, energy)
        )

    if conv <= conv_tol:
        break

I got this error about reshaping, maybe similar to the broadcasting error:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[8], line 29
     25 X_batch = np.array(X_[batch_index], requires_grad=False)
     26 Y_batch = Y_[batch_index]
---> 29 params, prev_energy = opt.step_and_cost(cost_fn, params, X_batch)
     30 qngd_cost.append(prev_energy)
     32 energy = cost_fn(params, X_batch)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/optimize/qng.py:192, in QNGOptimizer.step_and_cost(self, qnode, grad_fn, recompute_tensor, metric_tensor_fn, *args, **kwargs)
    189 if metric_tensor_fn is None:
    190     metric_tensor_fn = qml.metric_tensor(qnode, approx=self.approx)
--> 192 _metric_tensor = metric_tensor_fn(*args, **kwargs)
    193 # Reshape metric tensor to be square
    194 shape = qml.math.shape(_metric_tensor)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/workflow/qnode.py:1048, in QNode.__call__(self, *args, **kwargs)
   1043         full_transform_program._set_all_argnums(
   1044             self, args, kwargs, argnums
   1045         )  # pylint: disable=protected-access
   1047 # pylint: disable=unexpected-keyword-arg
-> 1048 res = qml.execute(
   1049     (self._tape,),
   1050     device=self.device,
   1051     gradient_fn=self.gradient_fn,
   1052     interface=self.interface,
   1053     transform_program=full_transform_program,
   1054     config=config,
   1055     gradient_kwargs=self.gradient_kwargs,
   1056     override_shots=override_shots,
   1057     **self.execute_kwargs,
   1058 )
   1060 res = res[0]
   1062 # convert result to the interface in case the qfunc has no parameters

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/workflow/execution.py:685, in execute(tapes, device, gradient_fn, interface, transform_program, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp)
    683 if no_interface_boundary_required:
    684     results = inner_execute(tapes)
--> 685     return post_processing(results)
    687 _grad_on_execution = False
    689 if (
    690     device_vjp
    691     and "lightning" in getattr(device, "short_name", "")
    692     and interface in jpc_interfaces
    693 ):

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:86, in _apply_postprocessing_stack(results, postprocessing_stack)
     63 """Applies the postprocessing and cotransform postprocessing functions in a Last-In-First-Out LIFO manner.
     64 
     65 Args:
   (...)
     83 
     84 """
     85 for postprocessing in reversed(postprocessing_stack):
---> 86     results = postprocessing(results)
     87 return results

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:56, in _batch_postprocessing(results, individual_fns, slices)
     30 def _batch_postprocessing(
     31     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     32 ) -> ResultBatch:
     33     """Broadcast individual post processing functions onto their respective tapes.
     34 
     35     Args:
   (...)
     54 
     55     """
---> 56     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/transforms/core/transform_program.py:56, in <genexpr>(.0)
     30 def _batch_postprocessing(
     31     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     32 ) -> ResultBatch:
     33     """Broadcast individual post processing functions onto their respective tapes.
     34 
     35     Args:
   (...)
     54 
     55     """
---> 56     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/gradients/metric_tensor.py:504, in _metric_tensor_cov_matrix.<locals>.processing_fn(probs)
    502 scale = qml.math.convert_like(qml.math.outer(coeffs, coeffs), p)
    503 scale = qml.math.cast_like(scale, p)
--> 504 g = scale * qml.math.cov_matrix(p, obs, wires=tape.wires, diag_approx=diag_approx)
    505 for i, in_argnum in enumerate(params_in_argnum):
    506     # fill in rows and columns of zeros where a parameter was not in argnum
    507     if not in_argnum:

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/math/quantum.py:102, in cov_matrix(prob, obs, wires, diag_approx)
    100 eigvals = cast(o.eigvals(), dtype=float64)
    101 w = o.wires.labels if wires is None else wires.indices(o.wires)
--> 102 p = marginal_prob(prob, w)
    104 res = dot(eigvals**2, p) - (dot(eigvals, p)) ** 2
    105 variances.append(res)

File ~/PinQ2_py/lib/python3.9/site-packages/pennylane/math/quantum.py:169, in marginal_prob(prob, axis)
    166     return prob
    168 inactive_wires = tuple(set(range(num_wires)) - set(axis))
--> 169 prob = np.reshape(prob, [2] * num_wires)
    170 prob = np.sum(prob, axis=inactive_wires)
    171 return np.flatten(prob)

File ~/PinQ2_py/lib/python3.9/site-packages/autoray/autoray.py:79, in do(fn, like, *args, **kwargs)
     30 """Do function named ``fn`` on ``(*args, **kwargs)``, peforming single
     31 dispatch to retrieve ``fn`` based on whichever library defines the class of
     32 the ``args[0]``, or the ``like`` keyword argument if specified.
   (...)
     76     <tf.Tensor: id=91, shape=(3, 3), dtype=float32>
     77 """
     78 backend = choose_backend(fn, *args, like=like, **kwargs)
---> 79 return get_lib_fn(backend, fn)(*args, **kwargs)

File <__array_function__ internals>:180, in reshape(*args, **kwargs)

File ~/PinQ2_py/lib/python3.9/site-packages/numpy/core/fromnumeric.py:298, in reshape(a, newshape, order)
    198 @array_function_dispatch(_reshape_dispatcher)
    199 def reshape(a, newshape, order='C'):
    200     """
    201     Gives a new shape to an array without changing its data.
    202 
   (...)
    296            [5, 6]])
    297     """
--> 298     return _wrapfunc(a, 'reshape', newshape, order=order)

File ~/PinQ2_py/lib/python3.9/site-packages/numpy/core/fromnumeric.py:57, in _wrapfunc(obj, method, *args, **kwds)
     54     return _wrapit(obj, method, *args, **kwds)
     56 try:
---> 57     return bound(*args, **kwds)
     58 except TypeError:
     59     # A TypeError occurs if the object does have such a method in its
     60     # class, but its signature is not identical to that of NumPy's. This
   (...)
     64     # Call _wrapit from within the except clause to ensure a potential
     65     # exception has a traceback chain.
     66     return _wrapit(obj, method, *args, **kwds)

ValueError: cannot reshape array of size 2560 into shape (2,2,2,2,2,2,2,2,2,2,2)

If I change the interface parameter, I got:
For interface='tf':

AttributeError: in user code:


    AttributeError: 'tensor' object has no attribute '_id'

For interface='torch':

TypeError: The inputs given to jacobian must be either a Tensor or a tuple of Tensors but the value at index 0 has type <class 'pennylane.numpy.tensor.tensor'>.

For interface='jax':

TypeError: cannot reshape array of shape (2560,) (size 2560) into shape [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] (size 2048)

And if I changed the QNGOptimizer to QNSPSAOptimizer:

opt = qml.QNSPSAOptimizer(stepsize=1e-2)

I obtained with interface='auto':

ValueError: operands could not be broadcast together with shapes (10,) (32,) 

I continue to investigate.

Hey @Christophe_Pere,

No need to specify the interface anymore when using default.qubit :slight_smile:. It automatically decides what’s best to use!

I spent some time banging my head against the desk trying to figure out what was going on, and it was a combination of a couple things :sweat_smile: :sweat_smile:

  1. Something was off about how you were selecting a batch from X_ and (correspondingly) Y_. This is what I did:
num_batches = len(X_) // batch_size

for n in range(3):

    # shuffle the data
    shuffled_idices = np.random.permutation(len(X_))
    X_ = np.array(X_[shuffled_idices])
    Y_ = np.array(Y_[shuffled_idices])

    for i in range(len(X_)):
        params = opt.step(circuit, params, X=X_[i])

    print(params)

This works with opt = qml.QNGOptimizer(step_size, lam = 0.001, approx='block-diag')

  1. With QNSPSA, I had to do this (I’m not sure why, but the tensor type of params is getting bounced to a vanilla NumPy array after one step). Will look into it!
num_batches = len(X_) // batch_size

for n in range(3):

    # shuffle the data
    shuffled_idices = np.random.permutation(len(X_))
    X_ = np.array(X_[shuffled_idices])
    Y_ = np.array(Y_[shuffled_idices])
    
    for i in range(len(X_)):
        params, cost_val = opt.step_and_cost(circuit, params, X=X_[i])
        params = np.array(params, requires_grad=True) # needed ???

    print(params)
1 Like

Looks like this is indeed a bug :slight_smile:. A bug report has been made and a fix will roll out as soon as we have time to work on it!

Hi @isaacdevlugt ,

Thanks a lot for your time. I did not understand that I couldn’t pass a batch but sample by sample. I will test your code both for QNGand QNSPSA. Thanks also for pointing out the bug. I will try both a get you updated.

Hi @isaacdevlugt ,

With the code you provided, and what I did, and some time banging my head against the desk. I was able to run the code in a pre-alpha version and obtain results!!.

Output:

Iteration = 0,   Cost = -2.35790925 for bitstring 00101001
Iteration = 27,  Cost = -2.48741100 for bitstring 00101110
Iteration = 33,  Cost = -2.63781294 for bitstring 01110111
Iteration = 43,  Cost = -3.18746897 for bitstring 11100010
Iteration = 78,  Cost = -3.26080155 for bitstring 00001111
Iteration = 84,  Cost = -3.39668699 for bitstring 10101100
...

Thanks a lot. It can be interesting to write a demo on this. What do you think?

Hi @isaacdevlugt ,

I have a strange thing with the code. When I use default.qubit in the device, the cost returned by:

params, cost = opt.step_and_cost(circuit, params, X=X_[i])

is a float. But if I use lightning.qubit the type of the cost is a numpy.ndarray. Why does the backend change the type of the parameters returned by the optimizer?

It can be interesting to write a demo on this. What do you think?

Nice! If you want to write a demo about your work, you can follow the instructions here: How to submit a demo | PennyLane

Why does the backend change the type of the parameters returned by the optimizer?

There seems to be another bug with lightning qubit and the qnspsa optimizer :thinking:. I’ll open another report!

Hi @isaacdevlugt ,

Yes, why not write a demo for this one. Could be helpful for others. Thanks for the issue. I hope it will help.

Best,

1 Like

Hi @isaacdevlugt ,

I returned with the QNSPSA optimizer and a strange new behaviour.

The discussion was successful with a simulator—all the PennyLane simulators. This time, I tried using a real machine, IBM Quantum System One. I got an error using the optimizer:

Traceback (most recent call last):
  File "/home/ubuntu/Pennylane/qnspsa_qng_varQfs.py", line 225, in <module>
    selected_variables, energy = fit(X_new, ansatz_3, depth, batch_size, num_batches, opt, max_features)
  File "/home/ubuntu/Pennylane/qnspsa_qng_varQfs.py", line 153, in fit
    _bitstring_, cost_save = select_variables(X, ansatz, depth, batch_size, num_batches,  opt, num_qubits=features)
  File "/home/ubuntu/Pennylane/qnspsa_qng_varQfs.py", line 127, in select_variables
    params, cost = opt.step_and_cost(qnode, params, X=X_batch[i], depth=depth, num_qubits=num_qubits)
  File "/home/ubuntu/Concordia-test/lib/python3.10/site-packages/pennylane/optimize/qnspsa.py", line 191, in step_and_cost
    params_next, loss_curr = self._apply_blocking(cost, args, kwargs, params_next)
  File "/home/ubuntu/Concordia-test/lib/python3.10/site-packages/pennylane/optimize/qnspsa.py", line 441, in _apply_blocking
    program, _ = cost.device.preprocess()
AttributeError: 'IBMQCircuitRunnerDevice' object has no attribute 'preprocess'

I configured the QNode with IBMQiskitRuntime like this:

from qiskit_ibm_runtime import QiskitRuntimeService
provider = QiskitRuntimeService(channel='ibm_quantum', token='my_token')
    
dev = qml.device('qiskit.ibmq.sampler', wires=num_qubits,  backend='ibm_quebec', shots=1000, provider=provider)

qnode = qml.QNode(ansatz, dev, diff_method="parameter-shift")

ibm_quebec is the hardware I have access to. I try to use the same code as previously discussed with a real machine.

Is it something I missed?

Best,

C.

@Christophe_Pere Are you able to find a solution of QNSPSA optimizer? My query is already in Pennylane help. Because from last week, I am also facing same issue when I am using Amplitude encoding with QNSPSA. Its giving an error ValueError: Broadcasting with MottonenStatePreparation is not supported. Please use the qml.transforms.broadcast_expand transform to use broadcasting with MottonenStatePreparation.

@Christophe_Pere If anything works at your end with QNSPSA. Please help.