Update 2
The below is the updated cost function and training loop.
def cost(var, features, observed):
preds = qnn(var, features)
preds_np = np.array(preds).T
mse = sklearn.metrics.mean_squared_error(observed, preds_np)
return mse
opt = qml.AdamOptimizer(0.1, beta1=0.9, beta2=0.999)
num_train = len(x_train)
batch_size = 256
var = var_init
for it in range(100):
batch_index = np.random.randint(0, num_train, (batch_size,))
x_train_batch = x_train[batch_index]
y_train_batch = y_train[batch_index]
var, _cost = opt.step_and_cost(lambda v: cost(v,x_train_batch,y_train_batch), var)
print("Iter: {:5d} | Cost: {:0.7f} ".format(it, _cost))
The remaining code pretty much remains the same.
I have tried all I could for now but the TypeError: float() argument must be a string or a real number, not ‘ArrayBox’ still persists. I have added the error below as well.
what I could understand is that somehow my predictions are returning array[array(value),array(value)…and so on] which I don’t understand since I have made sure to make everything numpy.float. And below it says that my prediction (preds_np is anarray box), so the error lies in the calculation of mean squared error. Apart from that I have tried manually making a mse function which gave the same error and i tried other ways as well and everything returned the same error.
Shapes: observed=(256, 5), preds_np=(256, 5)
Observed: [[ 0.07636211 0.01042428 1.68223176 0.3865876 2.59282841]
[ 0.85141523 -1.76938919 1.65395276 -1.24754269 -1.01027803]
[ 0.48676007 -0.00888649 -0.03757296 1.75628312 -0.31670208]
...
[-1.24609538 0.82554933 -0.01931672 -1.10620168 0.14732945]
[ 0.35853445 0.06638076 0.49601253 -0.73526325 -0.77044028]
[ 0.4579714 0.18093778 -0.10214571 -0.22933184 -1.03835437]]
Preds_np: [[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f557af40>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed54c0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f4fb6ec0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7b8cdc0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7badfc0>]
[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35fbec6d80>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed5440>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7c58b80>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7b8d740>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bad5c0>]
[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35fcd60700>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed4d40>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7c58c00>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bba600>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bafbc0>]
...
[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed5180>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35fcd8d840>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7b8c480>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bae200>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f8cbaa80>]
[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed5280>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35fcd8c7c0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7b8d100>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bae000>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f8cb9780>]
[<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7ed4cc0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f4fb5fc0>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7b8c040>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35e7bae900>
<autograd.numpy.numpy_boxes.ArrayBox object at 0x7c35f8cbb480>]]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
TypeError: float() argument must be a string or a real number, not 'ArrayBox'
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
<ipython-input-239-956141b9c0ef> in <cell line: 5>()
7 x_train_batch = x_train[batch_index]
8 y_train_batch = y_train[batch_index]
----> 9 var, _cost = opt.step_and_cost(lambda v: cost(v,x_train_batch,y_train_batch), var)
10 print("Iter: {:5d} | Cost: {:0.7f} ".format(it, _cost))
13 frames
/usr/local/lib/python3.10/dist-packages/pennylane/optimize/gradient_descent.py in step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
57 """
58
---> 59 g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
60 new_args = self.apply_grad(g, args)
61
/usr/local/lib/python3.10/dist-packages/pennylane/optimize/gradient_descent.py in compute_grad(objective_fn, args, kwargs, grad_fn)
115 """
116 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 117 grad = g(*args, **kwargs)
118 forward = getattr(g, "forward", None)
119
/usr/local/lib/python3.10/dist-packages/pennylane/_grad.py in __call__(self, *args, **kwargs)
116 return ()
117
--> 118 grad_value, ans = grad_fn(*args, **kwargs) # pylint: disable=not-callable
119 self._forward = ans
120
/usr/local/lib/python3.10/dist-packages/autograd/wrap_util.py in nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
21 return nary_f
22 return nary_operator
/usr/local/lib/python3.10/dist-packages/pennylane/_grad.py in _grad_with_forward(fun, x)
134 difference being that it returns both the gradient *and* the forward pass
135 value."""
--> 136 vjp, ans = _make_vjp(fun, x)
137
138 if not vspace(ans).size == 1:
/usr/local/lib/python3.10/dist-packages/autograd/core.py in make_vjp(fun, x)
8 def make_vjp(fun, x):
9 start_node = VJPNode.new_root()
---> 10 end_value, end_node = trace(start_node, fun, x)
11 if end_node is None:
12 def vjp(g): return vspace(x).zeros()
/usr/local/lib/python3.10/dist-packages/autograd/tracer.py in trace(start_node, fun, x)
8 with trace_stack.new_trace() as t:
9 start_box = new_box(x, t, start_node)
---> 10 end_box = fun(start_box)
11 if isbox(end_box) and end_box._trace == start_box._trace:
12 return end_box._value, end_box._node
/usr/local/lib/python3.10/dist-packages/autograd/wrap_util.py in unary_f(x)
13 else:
14 subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)
16 if isinstance(argnum, int):
17 x = args[argnum]
<ipython-input-239-956141b9c0ef> in <lambda>(v)
7 x_train_batch = x_train[batch_index]
8 y_train_batch = y_train[batch_index]
----> 9 var, _cost = opt.step_and_cost(lambda v: cost(v,x_train_batch,y_train_batch), var)
10 print("Iter: {:5d} | Cost: {:0.7f} ".format(it, _cost))
<ipython-input-238-515f0ead4b63> in cost(var, features, observed)
7 print("Preds_np:", preds_np)
8
----> 9 mse = sklearn.metrics.mean_squared_error(observed, preds_np)
10 return mse
/usr/local/lib/python3.10/dist-packages/sklearn/metrics/_regression.py in mean_squared_error(y_true, y_pred, sample_weight, multioutput, squared)
440 0.825...
441 """
--> 442 y_type, y_true, y_pred, multioutput = _check_reg_targets(
443 y_true, y_pred, multioutput
444 )
/usr/local/lib/python3.10/dist-packages/sklearn/metrics/_regression.py in _check_reg_targets(y_true, y_pred, multioutput, dtype)
100 check_consistent_length(y_true, y_pred)
101 y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
--> 102 y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
103
104 if y_true.ndim == 1:
/usr/local/lib/python3.10/dist-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name)
877 array = xp.astype(array, dtype, copy=False)
878 else:
--> 879 array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)
880 except ComplexWarning as complex_warning:
881 raise ValueError(
/usr/local/lib/python3.10/dist-packages/sklearn/utils/_array_api.py in _asarray_with_order(array, dtype, order, copy, xp)
183 if xp.__name__ in {"numpy", "numpy.array_api"}:
184 # Use NumPy API to support order
--> 185 array = numpy.asarray(array, order=order, dtype=dtype)
186 return xp.asarray(array, copy=copy)
187 else:
ValueError: setting an array element with a sequence.