ValueError: need at least one array to stack

I used penny==0.36.0 to train my neural network, and found this error. My code for training is like:

def TrainQKSAN():
    opt = qml.NesterovMomentumOptimizer(0.2)
    batch_size = 15
    weights = paras
    #bias = np.array(0.0, requires_grad=True)
    his_acc_val = []
    his_acc_train = []
    his_cost = []

    for it in range(30):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, 200, (batch_size,))
        feats_train_batch = x_train[batch_index]
        Y_train_batch = y_train[batch_index]
        #cost(params, bias, features, labels):
        weights, _, _ = opt.step(cost1, weights, feats_train_batch, Y_train_batch)

        # Compute predictions on train and validation set
        predictions_train = [np.sign(variational_classifier(weights, f)) for f in x_train]
        predictions_val = [np.sign(variational_classifier(weights, f)) for f in x_val]

        # Compute accuracy on train and validation set
        acc_train = accuracy(y_train, predictions_train)

        acc_val = accuracy(y_val, predictions_val)

        his_acc_val.append(acc_val)
        his_acc_train.append(acc_train)
        his_cost.append(cost1(weights, features, Y))
        print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
            "".format(it + 1, cost(weights, features, Y), acc_train, acc_val))
        
    return his_acc_val, his_acc_train, np.array(his_cost)

his_acc_val1, his_acc_train1, his_cost1 = TrainQKSAN()

print(his_acc_val1, his_acc_train1, his_cost1)
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[5], line 81
     76         print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
     77             "".format(it + 1, cost(weights, features, Y), acc_train, acc_val))
     79     return his_acc_val, his_acc_train, np.array(his_cost)
---> 81 his_acc_val1, his_acc_train1, his_cost1 = TrainQKSAN()
     83 print(his_acc_val1, his_acc_train1, his_cost1)

Cell In[5], line 62, in TrainQKSAN()
     60 Y_train_batch = y_train[batch_index]
     61 #cost(params, bias, features, labels):
---> 62 weights, _, _ = opt.step(cost1, weights, feats_train_batch, Y_train_batch)
     64 # Compute predictions on train and validation set
     65 predictions_train = [np.sign(variational_classifier(weights, f)) for f in x_train]

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/optimize/gradient_descent.py:93, in GradientDescentOptimizer.step(self, objective_fn, grad_fn, *args, **kwargs)
     75 def step(self, objective_fn, *args, grad_fn=None, **kwargs):
     76     """Update trainable arguments with one step of the optimizer.
     77 
     78     Args:
   (...)
     90         If single arg is provided, list [array] is replaced by array.
     91     """
---> 93     g, _ = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
     94     new_args = self.apply_grad(g, args)
     96     # unwrap from list if one argument, cleaner return

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/optimize/nesterov_momentum.py:76, in NesterovMomentumOptimizer.compute_grad(self, objective_fn, args, kwargs, grad_fn)
     73         shifted_args[index] = args[index] - self.momentum * self.accumulation[index]
     75 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
---> 76 grad = g(*shifted_args, **kwargs)
     77 forward = getattr(g, "forward", None)
     79 grad = (grad,) if len(trainable_indices) == 1 else grad

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/_grad.py:165, in grad.__call__(self, *args, **kwargs)
    162     self._forward = self._fun(*args, **kwargs)
    163     return ()
--> 165 grad_value, ans = grad_fn(*args, **kwargs)  # pylint: disable=not-callable
    166 self._forward = ans
    168 return grad_value

File /opt/conda/envs/tf1/lib/python3.9/site-packages/autograd/wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/_grad.py:191, in grad._grad_with_forward(fun, x)
    185 if vspace(ans).size != 1:
    186     raise TypeError(
    187         "Grad only applies to real scalar-output functions. "
    188         "Try jacobian, elementwise_grad or holomorphic_grad."
    189     )
--> 191 grad_value = vjp(vspace(ans).ones())
    192 return grad_value, ans

File /opt/conda/envs/tf1/lib/python3.9/site-packages/autograd/core.py:14, in make_vjp.<locals>.vjp(g)
---> 14 def vjp(g): return backward_pass(g, end_node)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/autograd/core.py:21, in backward_pass(g, end_node)
     19 for node in toposort(end_node):
     20     outgrad = outgrads.pop(node)
---> 21     ingrads = node.vjp(outgrad[0])
     22     for parent, ingrad in zip(node.parents, ingrads):
     23         outgrads[parent] = add_outgrads(outgrads.get(parent), ingrad)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/autograd/core.py:67, in defvjp.<locals>.vjp_argnums.<locals>.<lambda>(g)
     64         raise NotImplementedError(
     65             "VJP of {} wrt argnum 0 not defined".format(fun.__name__))
     66     vjp = vjpfun(ans, *args, **kwargs)
---> 67     return lambda g: (vjp(g),)
     68 elif L == 2:
     69     argnum_0, argnum_1 = argnums

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/workflow/interfaces/autograd.py:199, in vjp.<locals>.grad_fn(dy)
    196 def grad_fn(dy):
    197     """Returns the vector-Jacobian product with given
    198     parameter values and output gradient dy"""
--> 199     vjps = jpc.compute_vjp(tapes, dy)
    200     return tuple(
    201         qml.math.to_numpy(v, max_depth=1) if isinstance(v, ArrayBox) else v for v in vjps
    202     )

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/workflow/jacobian_products.py:296, in TransformJacobianProducts.compute_vjp(self, tapes, dy)
    293     logger.debug("compute_vjp called with (%s, %s)", tapes, dy)
    295 if self._cache_full_jacobian:
--> 296     jacs = self.compute_jacobian(tapes)
    297     return _compute_vjps(jacs, dy, tapes)
    299 vjp_tapes, processing_fn = qml.gradients.batch_vjp(
    300     tapes, dy, self._gradient_transform, gradient_kwargs=self._gradient_kwargs
    301 )

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/workflow/jacobian_products.py:326, in TransformJacobianProducts.compute_jacobian(self, tapes)
    324 if tapes in self._cache:
    325     return self._cache[tapes]
--> 326 jac_tapes, batch_post_processing = self._gradient_transform(tapes, **self._gradient_kwargs)
    327 results = self._inner_execute(jac_tapes)
    328 jacs = tuple(batch_post_processing(results))

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/transforms/core/transform_dispatcher.py:130, in TransformDispatcher.__call__(self, *targs, **tkwargs)
    128     return self._qfunc_transform(obj, targs, tkwargs)
    129 if isinstance(obj, Sequence) and all(isinstance(q, qml.tape.QuantumScript) for q in obj):
--> 130     return self._batch_transform(obj, targs, tkwargs)
    132 # Input is not a QNode nor a quantum tape nor a device.
    133 # Assume Python decorator syntax:
    134 #
    135 # result = some_transform(*transform_args)(qnode)(*qnode_args)
    137 raise TransformError(
    138     "Decorating a QNode with @transform_fn(**transform_kwargs) has been "
    139     "removed. Please decorate with @functools.partial(transform_fn, **transform_kwargs) "
   (...)
    142     "https://docs.pennylane.ai/en/stable/development/deprecations.html#completed-deprecation-cycles",
    143 )

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/transforms/core/transform_dispatcher.py:337, in TransformDispatcher._batch_transform(self, original_batch, targs, tkwargs)
    331 tape_counts = []
    333 for t in original_batch:
    334     # Preprocess the tapes by applying batch transforms
    335     # to each tape, and storing corresponding tapes
    336     # for execution, processing functions, and list of tape lengths.
--> 337     new_tapes, fn = self(t, *targs, **tkwargs)
    338     execution_tapes.extend(new_tapes)
    339     batch_fns.append(fn)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/transforms/core/transform_dispatcher.py:101, in TransformDispatcher.__call__(self, *targs, **tkwargs)
     99 start = 0
    100 for tape in expanded_tapes:
--> 101     intermediate_tapes, post_processing_fn = self._transform(
    102         tape, *targs, **tkwargs
    103     )
    104     transformed_tapes.extend(intermediate_tapes)
    105     end = start + len(intermediate_tapes)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/parameter_shift.py:1117, in param_shift(tape, argnum, shifts, gradient_recipes, fallback_fn, f0, broadcast)
   1115     g_tapes, fn = var_param_shift(tape, argnum, shifts, gradient_recipes, f0, broadcast)
   1116 else:
-> 1117     g_tapes, fn = expval_param_shift(tape, argnum, shifts, gradient_recipes, f0, broadcast)
   1119 gradient_tapes.extend(g_tapes)
   1121 if unsupported_params:

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/parameter_shift.py:396, in expval_param_shift(tape, argnum, shifts, gradient_recipes, f0, broadcast)
    393     gradient_data.append((1, np.array([1.0]), h_fn, None, g_tapes[0].batch_size))
    394     continue
--> 396 recipe = _choose_recipe(argnum, idx, gradient_recipes, shifts, tape)
    397 recipe, at_least_one_unshifted, unshifted_coeff = _extract_unshifted(
    398     recipe, at_least_one_unshifted, f0, gradient_tapes, tape
    399 )
    400 coeffs, multipliers, op_shifts = recipe.T

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/parameter_shift.py:116, in _choose_recipe(argnum, idx, gradient_recipes, shifts, tape)
    114 else:
    115     op_shifts = None if shifts is None else shifts[arg_idx]
--> 116     recipe = _get_operation_recipe(tape, idx, shifts=op_shifts)
    117 return recipe

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/parameter_shift.py:286, in _get_operation_recipe(tape, t_idx, shifts, order)
    280     raise qml.operation.OperatorPropertyUndefined(
    281         f"The operation {op.name} does not have a grad_recipe, parameter_frequencies or "
    282         "a generator defined. No parameter shift rule can be applied."
    283     ) from e
    285 # Create shift rule from frequencies with given shifts
--> 286 coeffs, shifts = qml.gradients.generate_shift_rule(frequencies, shifts=shifts, order=order).T
    287 # The generated shift rules do not include a rescaling of the parameter, only shifts.
    288 mults = np.ones_like(coeffs)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/general_shift_rules.py:316, in generate_shift_rule(frequencies, shifts, order)
    244 r"""Computes the parameter shift rule for a unitary based on its generator's eigenvalue
    245 frequency spectrum.
    246 
   (...)
    313 :math:`\frac{\partial^2 f}{\partial \phi^2} = \frac{1}{2} \left[f(\phi) - f(\phi-\pi)\right]`.
    314 """
    315 frequencies = tuple(f for f in frequencies if f > 0)
--> 316 rule = _get_shift_rule(frequencies, shifts=shifts)
    318 if order > 1:
    319     T = frequencies_to_period(frequencies)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/gradients/general_shift_rules.py:142, in _get_shift_rule(frequencies, shifts)
    139 @functools.lru_cache(maxsize=None)
    140 def _get_shift_rule(frequencies, shifts=None):
    141     n_freqs = len(frequencies)
--> 142     frequencies = qml.math.sort(qml.math.stack(frequencies))
    143     freq_min = frequencies[0]
    145     if len(set(frequencies)) != n_freqs or freq_min <= 0:

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/math/multi_dispatch.py:151, in multi_dispatch.<locals>.decorator.<locals>.wrapper(*args, **kwargs)
    148 interface = interface or get_interface(*dispatch_args)
    149 kwargs["like"] = interface
--> 151 return fn(*args, **kwargs)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/pennylane/math/multi_dispatch.py:497, in stack(values, axis, like)
    468 """Stack a sequence of tensors along the specified axis.
    469 
    470 .. warning::
   (...)
    494        [5.00e+00, 8.00e+00, 1.01e+02]], dtype=float32)>
    495 """
    496 values = np.coerce(values, like=like)
--> 497 return np.stack(values, axis=axis, like=like)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/autoray/autoray.py:80, in do(fn, like, *args, **kwargs)
     31 """Do function named ``fn`` on ``(*args, **kwargs)``, peforming single
     32 dispatch to retrieve ``fn`` based on whichever library defines the class of
     33 the ``args[0]``, or the ``like`` keyword argument if specified.
   (...)
     77     <tf.Tensor: id=91, shape=(3, 3), dtype=float32>
     78 """
     79 backend = choose_backend(fn, *args, like=like, **kwargs)
---> 80 return get_lib_fn(backend, fn)(*args, **kwargs)

File <__array_function__ internals>:180, in stack(*args, **kwargs)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/numpy/core/shape_base.py:422, in stack(arrays, axis, out)
    420 arrays = [asanyarray(arr) for arr in arrays]
    421 if not arrays:
--> 422     raise ValueError('need at least one array to stack')
    424 shapes = {arr.shape for arr in arrays}
    425 if len(shapes) != 1:

ValueError: need at least one array to stack

Hi @RX1 ,

Unfortunately the code you shared is not self-contained so I can’t run it to try to replicate your issue.

It looks to me like you’re looking to adapt the variational classifier demo. However if you change the dataset there are many things you need to change. In this case, with your new data (or the way you’re handling it) Y_train_batch = y_train[batch_index] no longer works. I don’t know your dataset but maybe something like this can work: Y_train_batch = [Y[i] for i in batch_index].

I hope this helps.