Hello! I’m trying to optimize a circuit in which some Hamiltonian (which I defined via qml.Hamiltonian
) is evolved via qml.evolve
followed by single-qubit rotations. Both the Hamiltonian and the rotations take trainable parameters. I defined a cost function and used the qml.GradientDescentOptimizer
. However, when I ran opt.step_and_cost
I got the following error message:
{
"name": "TypeError",
"message": "Can't differentiate w.r.t. type <class 'pennylane.ops.qubit.hamiltonian.Hamiltonian'>",
"stack": "---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
File ~/..., in new_box(value, trace, node)
117 try:
--> 118 return box_type_mappings[type(value)](value, trace, node)
119 except KeyError:
KeyError: <class 'pennylane.ops.qubit.hamiltonian.Hamiltonian'>
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
/Users/... .ipynb Cell 24 line 1
opt = qml.GradientDescentOptimizer(eta)
qng_cost = []
---> new_params = opt.step_and_cost(cost, init_params, a, phi)
# for _ in range(steps):
# print(f'Step: {_}')
# new_params = opt.step(circuit, init_params, phi)
# qng_cost.append(circuit(new_params))
File ~/..., in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
39 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
40 \"\"\"Update trainable arguments with one step of the optimizer and return the corresponding
41 objective function value prior to the step.
42
(...)
56 If single arg is provided, list [array] is replaced by array.
57 \"\"\"
---> 59 g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
60 new_args = self.apply_grad(g, args)
62 if forward is None:
File ~/..., in GradientDescentOptimizer.compute_grad(objective_fn, args, kwargs, grad_fn)
99 r\"\"\"Compute gradient of the objective function at the given point and return it along with
100 the objective function forward pass (if available).
101
(...)
114 will not be evaluted and instead ``None`` will be returned.
115 \"\"\"
116 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 117 grad = g(*args, **kwargs)
118 forward = getattr(g, \"forward\", None)
120 num_trainable_args = sum(getattr(arg, \"requires_grad\", False) for arg in args)
File ~/..., in grad.__call__(self, *args, **kwargs)
115 self._forward = self._fun(*args, **kwargs)
116 return ()
--> 118 grad_value, ans = grad_fn(*args, **kwargs) # pylint: disable=not-callable
119 self._forward = ans
121 return grad_value
File ~/..., in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
File ~/..., in grad._grad_with_forward(fun, x)
130 @staticmethod
131 @unary_to_nary
132 def _grad_with_forward(fun, x):
133 \"\"\"This function is a replica of ``autograd.grad``, with the only
134 difference being that it returns both the gradient *and* the forward pass
135 value.\"\"\"
--> 136 vjp, ans = _make_vjp(fun, x)
138 if not vspace(ans).size == 1:
139 raise TypeError(
140 \"Grad only applies to real scalar-output functions. \"
141 \"Try jacobian, elementwise_grad or holomorphic_grad.\"
142 )
File ~/..., in make_vjp(fun, x)
8 def make_vjp(fun, x):
9 start_node = VJPNode.new_root()
---> 10 end_value, end_node = trace(start_node, fun, x)
11 if end_node is None:
12 def vjp(g): return vspace(x).zeros()
File ~/..., in trace(start_node, fun, x)
8 with trace_stack.new_trace() as t:
9 start_box = new_box(x, t, start_node)
---> 10 end_box = fun(start_box)
11 if isbox(end_box) and end_box._trace == start_box._trace:
12 return end_box._value, end_box._node
File ~/..., in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
13 else:
14 subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)
/Users/... .ipynb Cell 24 line 2
def cost(params, a, phi):
----> circuit_output = circuit(params, a, phi)
mse = np.mean((phi-circuit_output)**2)
return mse
File ~/..., in QNode.__call__(self, *args, **kwargs)
967 kwargs[\"shots\"] = _get_device_shots(self._original_device)
969 # construct the tape
--> 970 self.construct(args, kwargs)
972 cache = self.execute_kwargs.get(\"cache\", False)
973 using_custom_cache = (
974 hasattr(cache, \"__getitem__\")
975 and hasattr(cache, \"__setitem__\")
976 and hasattr(cache, \"__delitem__\")
977 )
File ~/..., in QNode.construct(self, args, kwargs)
853 self.interface = qml.math.get_interface(*args, *list(kwargs.values()))
855 with qml.queuing.AnnotatedQueue() as q:
--> 856 self._qfunc_output = self.func(*args, **kwargs)
858 self._tape = QuantumScript.from_queue(q, shots)
860 params = self.tape.get_parameters(trainable_only=False)
/Users/... .ipynb Cell 24 line 3
qml.adjoint(U1)(params)
---> c = get_Sy(nqubits) * a[0]
return qml.expval(c)
File ~/..., in ArrayBox.__rmul__(self, other)
---> 36 def __rmul__(self, other): return anp.multiply(other, self)
File ~/..., in primitive.<locals>.f_wrapped(*args, **kwargs)
44 ans = f_wrapped(*argvals, **kwargs)
45 node = node_constructor(ans, f_wrapped, argvals, kwargs, argnums, parents)
---> 46 return new_box(ans, trace, node)
47 else:
48 return f_raw(*args, **kwargs)
File ~/..., in new_box(value, trace, node)
118 return box_type_mappings[type(value)](value, trace, node)
119 except KeyError:
--> 120 raise TypeError(\"Can't differentiate w.r.t. type {}\".format(type(value)))
TypeError: Can't differentiate w.r.t. type <class 'pennylane.ops.qubit.hamiltonian.Hamiltonian'>"
}
To put it more concretely, here’s my code! (Let me know if you need anything else)
def get_observables(N):
observables = []
# Coupling operators
for i in range(N-1):
observables.append(qml.PauliZ(i) @ qml.PauliZ(i+1))
# Identity operator
for i in range(N):
observables.append(qml.Identity(i))
return observables
def get_coeffs(params, N):
coeffs = []
# Coupling coeffs
for i in range(N-1):
coeffs.append((params[0])**2/params[1])
# Constant coeffs
for i in range(N):
coeffs.append(params[1])
return coeffs
def create_Hamiltonian(params):
coeffs = get_coeffs(params, nqubits)
obs = get_observables(nqubits)
# H = qml.dot(coeffs, obs)
H = qml.Hamiltonian(coeffs, obs)
return H
def get_Sy(nqubits):
S_0 = nqubits/2
c = 0
for i in range(nqubits):
c += (1/(2*S_0))*qml.PauliY(wires=i)
return c
def U1(params):
start_index = 0
num_trotter_steps = 10
for i in range(L):
new_params = params[start_index:start_index + 3]
H = create_Hamiltonian(new_params[0:2])
qml.evolve(H, num_steps = num_trotter_steps)
for j in range(nqubits):
qml.RX(new_params[2], wires=j) # Change params to make sure that theta value changes for each L
start_index += 3 # Put state_index in again
dev = qml.device("default.qubit", wires=nqubits, shots=None)
@qml.qnode(dev)
def circuit(params, a, phi):
for i in range(nqubits): # Making the initial CSS
qml.Hadamard(wires=i)
U1(params)
for z in range(nqubits): # Perturbation
qml.RY(phi, wires = z)
qml.adjoint(U1)(params)
# expectation_values = [qml.expval(qml.PauliY(wires=i)) for i in range(nqubits)]
c = get_Sy(nqubits) * a[0]
return qml.expval(c)
L = 1
nqubits = 10
init_params = create_params(L)
phi = np.array([0.001], requires_grad=False)
a = np.array([0.001], requires_grad=True)
nqubits = 10
eta = 0.1
opt = qml.GradientDescentOptimizer(eta)
new_params = opt.step_and_cost(cost, init_params, a, phi)
This is all the information about my packages:
Name: PennyLane
Version: 0.33.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/PennyLaneAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /.../
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane-Lightning
Platform info: macOS-13.4.1-x86_64-i386-64bit
Python version: 3.11.5
Numpy version: 1.26.2
Scipy version: 1.11.4
Installed devices:
- default.gaussian (PennyLane-0.33.1)
- default.mixed (PennyLane-0.33.1)
- default.qubit (PennyLane-0.33.1)
- default.qubit.autograd (PennyLane-0.33.1)
- default.qubit.jax (PennyLane-0.33.1)
- default.qubit.legacy (PennyLane-0.33.1)
- default.qubit.tf (PennyLane-0.33.1)
- default.qubit.torch (PennyLane-0.33.1)
- default.qutrit (PennyLane-0.33.1)
- null.qubit (PennyLane-0.33.1)
- lightning.qubit (PennyLane-Lightning-0.33.1)
Any help with fixing this error would be greatly appreciated! I have also tried using qml.dot
to define my Hamiltonian, but it still gives me the same error.