Hey @Chase_Roberts,
Thank you for the reply. The qnodes basically return the expval and variance of an observable for single mode squeezed state.
def qnode1(probe_state_params, encoded_phase):
preparation(probe_state_params[0],probe_state_params[1],probe_state_params[2],probe_state_params[3])
encoding(encoded_phase)
detection(HD_angle)
return qml.expval(qml.X(0))
Similarly for qnode with variance.
And the full error is :
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-26-0d143eebdd14> in <module>
1 grad_function = qml.grad(FI)
----> 2 grad = (grad_function(probe_state_params, encoded_phase))
~\Anaconda3\lib\site-packages\pennylane\_grad.py in __call__(self, *args, **kwargs)
94 """Evaluates the gradient function, and saves the function value
95 calculated during the forward pass in :attr:`.forward`."""
---> 96 grad_value, ans = self._get_grad_fn(args)(*args, **kwargs)
97 self._forward = ans
98 return grad_value
~\Anaconda3\lib\site-packages\autograd\wrap_util.py in nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
21 return nary_f
22 return nary_operator
~\Anaconda3\lib\site-packages\pennylane\_grad.py in _grad_with_forward(fun, x)
111 difference being that it returns both the gradient *and* the forward pass
112 value."""
--> 113 vjp, ans = _make_vjp(fun, x)
114
115 if not vspace(ans).size == 1:
~\Anaconda3\lib\site-packages\autograd\core.py in make_vjp(fun, x)
8 def make_vjp(fun, x):
9 start_node = VJPNode.new_root()
---> 10 end_value, end_node = trace(start_node, fun, x)
11 if end_node is None:
12 def vjp(g): return vspace(x).zeros()
~\Anaconda3\lib\site-packages\autograd\tracer.py in trace(start_node, fun, x)
8 with trace_stack.new_trace() as t:
9 start_box = new_box(x, t, start_node)
---> 10 end_box = fun(start_box)
11 if isbox(end_box) and end_box._trace == start_box._trace:
12 return end_box._value, end_box._node
~\Anaconda3\lib\site-packages\autograd\wrap_util.py in unary_f(x)
13 else:
14 subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)
16 if isinstance(argnum, int):
17 x = args[argnum]
<ipython-input-24-b1eb58ab96f4> in FI(probe_state_params, encoded_phase)
6 # derivative of mean
7 grad_function = qml.grad(qnode1)
----> 8 grad = (grad_function(probe_state_params, encoded_phase)[1])
9 mu_x2 = grad
10
~\Anaconda3\lib\site-packages\pennylane\_grad.py in __call__(self, *args, **kwargs)
94 """Evaluates the gradient function, and saves the function value
95 calculated during the forward pass in :attr:`.forward`."""
---> 96 grad_value, ans = self._get_grad_fn(args)(*args, **kwargs)
97 self._forward = ans
98 return grad_value
~\Anaconda3\lib\site-packages\autograd\wrap_util.py in nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
21 return nary_f
22 return nary_operator
~\Anaconda3\lib\site-packages\pennylane\_grad.py in _grad_with_forward(fun, x)
119 )
120
--> 121 grad_value = vjp(vspace(ans).ones())
122 return grad_value, ans
123
~\Anaconda3\lib\site-packages\autograd\core.py in vjp(g)
12 def vjp(g): return vspace(x).zeros()
13 else:
---> 14 def vjp(g): return backward_pass(g, end_node)
15 return vjp, end_value
16
~\Anaconda3\lib\site-packages\autograd\core.py in backward_pass(g, end_node)
19 for node in toposort(end_node):
20 outgrad = outgrads.pop(node)
---> 21 ingrads = node.vjp(outgrad[0])
22 for parent, ingrad in zip(node.parents, ingrads):
23 outgrads[parent] = add_outgrads(outgrads.get(parent), ingrad)
~\Anaconda3\lib\site-packages\autograd\core.py in <lambda>(g)
65 "VJP of {} wrt argnum 0 not defined".format(fun.__name__))
66 vjp = vjpfun(ans, *args, **kwargs)
---> 67 return lambda g: (vjp(g),)
68 elif L == 2:
69 argnum_0, argnum_1 = argnums
~\Anaconda3\lib\site-packages\pennylane\tape\interfaces\autograd.py in gradient_product(g)
200 # pass, so we do not need to re-unwrap the parameters.
201 self.set_parameters(self._all_params_unwrapped, trainable_only=False)
--> 202 jac = self.jacobian(device, params=params, **self.jacobian_options)
203 self.set_parameters(self._all_parameter_values, trainable_only=False)
204
~\Anaconda3\lib\site-packages\pennylane\tape\tapes\qubit_param_shift.py in jacobian(self, device, params, **options)
122 self._append_evA_tape = True
123 self._evA_result = None
--> 124 return super().jacobian(device, params, **options)
125
126 def parameter_shift(self, idx, params, **options):
~\Anaconda3\lib\site-packages\pennylane\tape\tapes\jacobian_tape.py in jacobian(self, device, params, **options)
563
564 # execute all tapes at once
--> 565 results = device.batch_execute(all_tapes)
566
567 # post-process the results with the appropriate function to fill jacobian columns with gradients
~\Anaconda3\lib\site-packages\pennylane\_device.py in batch_execute(self, circuits)
360 self.reset()
361
--> 362 res = self.execute(circuit.operations, circuit.observables)
363 results.append(res)
364
~\Anaconda3\lib\site-packages\pennylane\_device.py in execute(self, queue, observables, parameters, **kwargs)
288
289 for operation in queue:
--> 290 self.apply(operation.name, operation.wires, operation.parameters)
291
292 self.post_apply()
~\Anaconda3\lib\site-packages\pennylane\devices\default_gaussian.py in apply(self, operation, wires, par)
738
739 # get the symplectic matrix
--> 740 S = self._operation_map[operation](*par)
741
742 # expand the symplectic to act on the proper subsystem
~\Anaconda3\lib\site-packages\pennylane\devices\default_gaussian.py in squeezing(r, phi)
205 cp = math.cos(phi)
206 sp = math.sin(phi)
--> 207 ch = math.cosh(r)
208 sh = math.sinh(r)
209 return np.array([[ch - cp * sh, -sp * sh], [-sp * sh, ch + cp * sh]])
TypeError: must be real number, not ArrayBox
Thank you in advance for your time.
Regards,
Kannan