ValueError: State vectors have to be of norm 1.0, vector 0 has norm Autograd ArrayBox with value 1.002073287428885

I used the MottonenStatePreparation function when running my own build of NN, and the following problem occurred:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/tmp/ipykernel_13608/2497313299.py in <module>
----> 1 his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test = TrainQKSAN(params_init)
      2 
      3 print(his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test)

/tmp/ipykernel_13608/3138901598.py in TrainQKSAN(params_init)
     65         Y_train_batch = Y_train[batch_index]
     66 
---> 67         weights, _, _ = opt.step(cost, weights, feats_train_batch, Y_train_batch)
     68 
     69         # Compute predictions on train and validation set

/opt/conda/lib/python3.9/site-packages/pennylane/optimize/gradient_descent.py in step(self, objective_fn, grad_fn, *args, **kwargs)
     86         """
     87 
---> 88         g, _ = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
     89         new_args = self.apply_grad(g, args)
     90 

/opt/conda/lib/python3.9/site-packages/pennylane/optimize/nesterov_momentum.py in compute_grad(self, objective_fn, args, kwargs, grad_fn)
     69 
     70         g = get_gradient(objective_fn) if grad_fn is None else grad_fn
---> 71         grad = g(*shifted_args, **kwargs)
     72         forward = getattr(g, "forward", None)
     73 

/opt/conda/lib/python3.9/site-packages/pennylane/_grad.py in __call__(self, *args, **kwargs)
    118             return ()
    119 
--> 120         grad_value, ans = grad_fn(*args, **kwargs)  # pylint: disable=not-callable
    121         self._forward = ans
    122 

/opt/conda/lib/python3.9/site-packages/autograd/wrap_util.py in nary_f(*args, **kwargs)
     18             else:
     19                 x = tuple(args[i] for i in argnum)
---> 20             return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
     21         return nary_f
     22     return nary_operator

/opt/conda/lib/python3.9/site-packages/pennylane/_grad.py in _grad_with_forward(fun, x)
    136         difference being that it returns both the gradient *and* the forward pass
    137         value."""
--> 138         vjp, ans = _make_vjp(fun, x)
    139 
    140         if not vspace(ans).size == 1:

/opt/conda/lib/python3.9/site-packages/autograd/core.py in make_vjp(fun, x)
      8 def make_vjp(fun, x):
      9     start_node = VJPNode.new_root()
---> 10     end_value, end_node =  trace(start_node, fun, x)
     11     if end_node is None:
     12         def vjp(g): return vspace(x).zeros()

/opt/conda/lib/python3.9/site-packages/autograd/tracer.py in trace(start_node, fun, x)
      8     with trace_stack.new_trace() as t:
      9         start_box = new_box(x, t, start_node)
---> 10         end_box = fun(start_box)
     11         if isbox(end_box) and end_box._trace == start_box._trace:
     12             return end_box._value, end_box._node

/opt/conda/lib/python3.9/site-packages/autograd/wrap_util.py in unary_f(x)
     13                 else:
     14                     subargs = subvals(args, zip(argnum, x))
---> 15                 return fun(*subargs, **kwargs)
     16             if isinstance(argnum, int):
     17                 x = args[argnum]

/tmp/ipykernel_13608/3138901598.py in cost(params, features, labels)
     42 
     43 def cost(params, features, labels):
---> 44     predictions = [variational_classifier(params, feat) for feat in features]
     45     return square_loss(labels, predictions)
     46 

/tmp/ipykernel_13608/3138901598.py in <listcomp>(.0)
     42 
     43 def cost(params, features, labels):
---> 44     predictions = [variational_classifier(params, feat) for feat in features]
     45     return square_loss(labels, predictions)
     46 

/tmp/ipykernel_13608/3138901598.py in variational_classifier(params, feat)
     38 
     39 def variational_classifier(params, feat):
---> 40     return QKSAN(feat, params)
     41 
     42 

/opt/conda/lib/python3.9/site-packages/pennylane/qnode.py in __call__(self, *args, **kwargs)
    972 
    973         # construct the tape
--> 974         self.construct(args, kwargs)
    975 
    976         cache = self.execute_kwargs.get("cache", False)

/opt/conda/lib/python3.9/site-packages/pennylane/qnode.py in construct(self, args, kwargs)
    870             self.interface = qml.math.get_interface(*args, *list(kwargs.values()))
    871 
--> 872         self._tape = make_qscript(self.func, shots)(*args, **kwargs)
    873         self._qfunc_output = self.tape._qfunc_output
    874 

/opt/conda/lib/python3.9/site-packages/pennylane/tape/qscript.py in wrapper(*args, **kwargs)
   1529     def wrapper(*args, **kwargs):
   1530         with AnnotatedQueue() as q:
-> 1531             result = fn(*args, **kwargs)
   1532 
   1533         qscript = QuantumScript.from_queue(q, shots)

/tmp/ipykernel_13608/3138901598.py in QKSAN(feat, params)
     23     Ansatz(params[0: num_para], 0, wire)
     24     qml.adjoint(Ansatz)(params[num_para: 2 * num_para], 0, wire)
---> 25     qml.adjoint(DataEncoding)(x2, 0, wire)
     26 
     27     DataEncoding(x2, wire, (2 * wire))

/opt/conda/lib/python3.9/site-packages/pennylane/ops/op_math/adjoint.py in wrapper(*args, **kwargs)
    129     @wraps(fn)
    130     def wrapper(*args, **kwargs):
--> 131         qscript = make_qscript(fn)(*args, **kwargs)
    132         if lazy:
    133             adjoint_ops = [Adjoint(op) for op in reversed(qscript.operations)]

/opt/conda/lib/python3.9/site-packages/pennylane/tape/qscript.py in wrapper(*args, **kwargs)
   1529     def wrapper(*args, **kwargs):
   1530         with AnnotatedQueue() as q:
-> 1531             result = fn(*args, **kwargs)
   1532 
   1533         qscript = QuantumScript.from_queue(q, shots)

/tmp/ipykernel_13608/3138901598.py in DataEncoding(x, wires_start, wires_end)
      2 def DataEncoding(x, wires_start, wires_end):
      3     wires = np.arange(wires_start, wires_end).tolist()
----> 4     qml.MottonenStatePreparation(state_vector=x, wires=wires)
      5     #qml.AmplitudeEmbedding(features=x, wires=wires, pad_with=0., normalize=True)
      6 

/opt/conda/lib/python3.9/site-packages/pennylane/templates/state_preparations/mottonen.py in __init__(self, state_vector, wires, id)
    312                 norm = qml.math.sum(qml.math.abs(state) ** 2)
    313                 if not qml.math.allclose(norm, 1.0, atol=1e-3):
--> 314                     raise ValueError(
    315                         f"State vectors have to be of norm 1.0, vector {i} has norm {norm}"
    316                     )

ValueError: State vectors have to be of norm 1.0, vector 0 has norm Autograd ArrayBox with value 1.002073287428885

But strangely enough the first iteration gets the result:

@Maria_Schuld @CatalinaAlbornoz

feats_train = np.load(“…/dataset/F_feats_train.npy”)
Y_train = np.load(“…/dataset/F_Y_train.npy”)
feats_val = np.load(“…/dataset/F_feats_val.npy”)
Y_val = np.load(“…/dataset/F_Y_val.npy”)
features = np.load(“…/dataset/F_features.npy”)
Y = np.load(“…/dataset/F_Y.npy”)

this is the reason!!

Hi @R-X , I’m glad you found the solution!

Since you’re encoding your data (x) using MottonenStatePreparation you need to make sure that the norm is one, otherwise you get an error.