Hello, I am trying to optimise quantum circuits. Below is my minimum working example.
import pennylane as qml
from pennylane import numpy as np
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(initial_state, params):
qml.StatePrep(initial_state, wires=0, normalize=True)
qml.Rot(params[0], params[1], params[2], wires=0)
qml.Rot(params[3], params[4], params[5], wires=1)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[0, 1])
def cost_function(params, initial_state_vec, pprep_vec):
cost = 0
for i in range(len(initial_state_vec)):
results = circuit(initial_state_vec[i], params)
cost += pprep_vec[i]*(1 - results[i])
return cost
opt = qml.GradientDescentOptimizer()
n_iter = 100
initial_state_vec = [[1, 0], [0, 1], [1, 1], [1, -1]]
n_state = len(initial_state_vec)
pprep_vec = np.ones(n_state)/n_state
rng = np.random.default_rng()
params = rng.random(6)*np.pi
cost = np.empty(n_iter)
for i in range(n_iter):
params, cost[i] = opt.step_and_cost(lambda params: cost_function(params, initial_state_vec, pprep_vec), params)
As I scale up the number of qubits and parameters, I would like to do this optimisation in parallel to save time. I know Pennylane has parameter broadcasting but I do not know how to get it working with my example. If I include parameter broadcasting by changing the params and cost function as below,
params = rng.random((6,2))*np.pi
def cost_function(params, initial_state_vec, pprep_vec):
cost = [0]*2
for j in range(len(cost)):
for i in range(len(initial_state_vec)):
results = circuit(initial_state_vec[i], params)
cost[j] += pprep_vec[i]*(1 - results[j][i])
return cost
I get the following error.
C:\Users\Jackson\venv-lightning-no-spyder-notebook\Lib\site-packages\autograd\tracer.py:14: UserWarning: Output seems independent of input.
warnings.warn("Output seems independent of input.")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\spyder_kernels\customize\utils.py:209, in exec_encapsulate_locals(code_ast, globals, locals, exec_fun, filename)
207 if filename is None:
208 filename = "<stdin>"
--> 209 exec_fun(compile(code_ast, filename, "exec"), globals, None)
210 finally:
211 if use_locals_hack:
212 # Cleanup code
File c:\users\jackson\documents\phd applications 2023\singapore\astar\python codes\untitled0.py:40
38 cost = np.empty(n_iter)
39 for i in range(n_iter):
---> 40 params, cost[i] = opt.step_and_cost(lambda params: cost_function(params, initial_state_vec, pprep_vec), params)
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\pennylane\optimize\gradient_descent.py:64, in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
44 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
45 """Update trainable arguments with one step of the optimizer and return the corresponding
46 objective function value prior to the step.
47
(...)
61 If single arg is provided, list [array] is replaced by array.
62 """
---> 64 g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
65 new_args = self.apply_grad(g, args)
67 if forward is None:
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\pennylane\optimize\gradient_descent.py:122, in GradientDescentOptimizer.compute_grad(objective_fn, args, kwargs, grad_fn)
104 r"""Compute the gradient of the objective function at the given point and return it along with
105 the objective function forward pass (if available).
106
(...)
119 will not be evaluated and instead ``None`` will be returned.
120 """
121 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 122 grad = g(*args, **kwargs)
123 forward = getattr(g, "forward", None)
125 num_trainable_args = sum(getattr(arg, "requires_grad", False) for arg in args)
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\pennylane\_grad.py:224, in grad.__call__(self, *args, **kwargs)
221 self._forward = self._fun(*args, **kwargs)
222 return ()
--> 224 grad_value, ans = grad_fn(*args, **kwargs) # pylint: disable=not-callable
225 self._forward = ans
227 return grad_value
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\autograd\wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
File ~\venv-lightning-no-spyder-notebook\Lib\site-packages\pennylane\_grad.py:245, in grad._grad_with_forward(fun, x)
242 vjp, ans = _make_vjp(fun, x) # pylint: disable=redefined-outer-name
244 if vspace(ans).size != 1:
--> 245 raise TypeError(
246 "Grad only applies to real scalar-output functions. "
247 "Try jacobian, elementwise_grad or holomorphic_grad."
248 )
250 grad_value = vjp(vspace(ans).ones())
251 return grad_value, ans
TypeError: Grad only applies to real scalar-output functions. Try jacobian, elementwise_grad or holomorphic_grad.
Here is the output of qml.about().
Name: PennyLane
Version: 0.39.0
Summary: PennyLane is a cross-platform Python library for quantum computing, quantum machine learning, and quantum chemistry. Train a quantum computer the same way as a neural network.
Home-page: https://github.com/PennyLaneAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: C:\Users\Jackson\venv-lightning-no-spyder-notebook\Lib\site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, packaging, pennylane-lightning, requests, rustworkx, scipy, toml, typing-extensions
Required-by: PennyLane_Lightning
Platform info: Windows-10-10.0.19045-SP0
Python version: 3.12.7
Numpy version: 2.0.2
Scipy version: 1.14.1
Installed devices:
- default.clifford (PennyLane-0.39.0)
- default.gaussian (PennyLane-0.39.0)
- default.mixed (PennyLane-0.39.0)
- default.qubit (PennyLane-0.39.0)
- default.qutrit (PennyLane-0.39.0)
- default.qutrit.mixed (PennyLane-0.39.0)
- default.tensor (PennyLane-0.39.0)
- null.qubit (PennyLane-0.39.0)
- reference.qubit (PennyLane-0.39.0)
- lightning.qubit (PennyLane_Lightning-0.39.0)
Any help with this is appreciated.