Hi Catalina, Thanks for your reply. Here is the info
-
qml.about()
Name: PennyLane
Version: 0.29.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: GitHub - PennyLaneAI/pennylane: PennyLane is a cross-platform Python library for quantum computing, quantum machine learning, and quantum chemistry. Train a quantum computer the same way as a neural network.
Author:
Author-email:
License: Apache License 2.0
Location: /opt/anaconda3/lib/python3.11/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning, PennyLane-SF
Platform info: macOS-10.16-x86_64-i386-64bit
Python version: 3.11.7
Numpy version: 1.23.5
Scipy version: 1.11.4
Installed devices:
- default.gaussian (PennyLane-0.29.1)
- default.mixed (PennyLane-0.29.1)
- default.qubit (PennyLane-0.29.1)
- default.qubit.autograd (PennyLane-0.29.1)
- default.qubit.jax (PennyLane-0.29.1)
- default.qubit.tf (PennyLane-0.29.1)
- default.qubit.torch (PennyLane-0.29.1)
- default.qutrit (PennyLane-0.29.1)
- null.qubit (PennyLane-0.29.1)
- lightning.qubit (PennyLane-Lightning-0.30.0)
- strawberryfields.fock (PennyLane-SF-0.29.1)
- strawberryfields.gaussian (PennyLane-SF-0.29.1)
- strawberryfields.gbs (PennyLane-SF-0.29.1)
- strawberryfields.remote (PennyLane-SF-0.29.1)
- strawberryfields.tf (PennyLane-SF-0.29.1)
- I am trying to solve keeping_expectations_low challenge. I implemented optimize_circuit like this. variational_circuit is returing expval as tensor object.
import json
import pennylane as qml
import pennylane.numpy as np
WIRES = 2
LAYERS = 5
NUM_PARAMETERS = LAYERS * WIRES * 3
initial_params = np.random.random(NUM_PARAMETERS)
def variational_circuit(params,hamiltonian):
parameters = params.reshape((LAYERS, WIRES, 3))
qml.templates.StronglyEntanglingLayers(parameters, wires=range(WIRES))
return qml.expval(qml.Hermitian(hamiltonian, wires = [0,1]))
def optimize_circuit(params,hamiltonian):
max_iterations = 500
tol_ener_diff = 1e-06
step_size = 0.01
for n in range(max_iterations):
print(variational_circuit(params, hamiltonian))
opt = qml.GradientDescentOptimizer(stepsize=step_size)
params, prev_energy = opt.step_and_cost(variational_circuit, params, hamiltonian)
energy = variational_circuit(params)
# Calculate difference between new and old energies
ener_diff = np.abs(energy - prev_energy)
if n % 20 == 0:
print(
"Iteration = {:}, Energy = {:.8f} Ha, Convergence parameter = {"
":.8f} Ha".format(n, energy, conv)
)
if ener_diff <= tol_ener_diff:
break
return ener
dev = qml.device('default.qubit', wires=2)
circuit = qml.QNode(variational_circuit, dev, interface="autograd")
def run(test_case_input: str) -> str:
ins = np.array(json.loads(test_case_input), requires_grad = False)
hamiltonian = np.array(ins,float).reshape((2 ** WIRES), (2 ** WIRES))
np.random.seed(1967)
initial_params = np.random.random(NUM_PARAMETERS)
out = str(optimize_circuit(initial_params,hamiltonian))
return out
def check(solution_output: str, expected_output: str) -> None:
solution_output = json.loads(solution_output)
expected_output = json.loads(expected_output)
assert np.isclose(solution_output, expected_output, rtol=5e-2)
# These are the public test cases
test_cases = [
('[0.863327072347624,0.0167108057202516,0.07991447085492759,0.0854049026262154,0.0167108057202516,0.8237963773906136,-0.07695947154193797,0.03131548733285282,0.07991447085492759,-0.07695947154193795,0.8355417021014687,-0.11345916130631205,0.08540490262621539,0.03131548733285283,-0.11345916130631205,0.758156886827099]', '0.61745341'),
('[0.32158897156285354,-0.20689268438270836,0.12366748295758379,-0.11737425017261123,-0.20689268438270836,0.7747346055276305,-0.05159966365446514,0.08215539696259792,0.12366748295758379,-0.05159966365446514,0.5769050487087416,0.3853362904758938,-0.11737425017261123,0.08215539696259792,0.3853362904758938,0.3986256655167206]', '0.00246488')
]
# This will run the public test cases locally
for i, (input_, expected_output) in enumerate(test_cases):
print(f"Running test case {i} with input '{input_}'...")
output = run(input_)
try:
output = run(input_)
except Exception as exc:
print(f"Runtime Error. {exc}")
else:
if message := check(output, expected_output):
print(f"Wrong Answer. Have: '{output}'. Want: '{expected_output}'.")
else:
print("Correct!")
when I am trying to run test cases mentioned in usecases I am getting below error.
- complete error stacktrace
KeyError Traceback (most recent call last)
File /opt/anaconda3/lib/python3.11/site-packages/autograd/core.py:233, in vspace(value)
232 try:
→ 233 return VSpace.mappingstype(value)
234 except KeyError:
KeyError: <class ‘pennylane.measurements.expval.ExpectationMP’>
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
Cell In[61], line 26
24 for i, (input_, expected_output) in enumerate(test_cases):
25 print(f"Running test case {i} with input ‘{input_}’…")
—> 26 output = run(input_)
27 try:
28 output = run(input_)
Cell In[61], line 8, in run(test_case_input)
6 np.random.seed(1967)
7 initial_params = np.random.random(NUM_PARAMETERS)
----> 8 out = str(optimize_circuit(initial_params,hamiltonian))
10 return out
Cell In[59], line 8, in optimize_circuit(params, hamiltonian)
6 print(variational_circuit(params, hamiltonian))
7 opt = qml.GradientDescentOptimizer(stepsize=step_size)
----> 8 params, prev_energy = opt.step_and_cost(variational_circuit, params, hamiltonian)
9 energy = variational_circuit(params)
10 # Calculate difference between new and old energies
File /opt/anaconda3/lib/python3.11/site-packages/pennylane/optimize/gradient_descent.py:59, in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
39 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
40 “”“Update trainable arguments with one step of the optimizer and return the corresponding
41 objective function value prior to the step.
42
(…)
56 If single arg is provided, list [array] is replaced by array.
57 “””
—> 59 g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
60 new_args = self.apply_grad(g, args)
62 if forward is None:
File /opt/anaconda3/lib/python3.11/site-packages/pennylane/optimize/gradient_descent.py:117, in GradientDescentOptimizer.compute_grad(objective_fn, args, kwargs, grad_fn)
99 r""“Compute gradient of the objective function at the given point and return it along with
100 the objective function forward pass (if available).
101
(…)
114 will not be evaluted and instead None
will be returned.
115 “””
116 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
→ 117 grad = g(*args, **kwargs)
118 forward = getattr(g, “forward”, None)
120 num_trainable_args = sum(getattr(arg, “requires_grad”, False) for arg in args)
File /opt/anaconda3/lib/python3.11/site-packages/pennylane/_grad.py:115, in grad.call(self, *args, **kwargs)
112 self._forward = self._fun(*args, **kwargs)
113 return ()
→ 115 grad_value, ans = grad_fn(*args, **kwargs)
116 self._forward = ans
118 return grad_value
File /opt/anaconda3/lib/python3.11/site-packages/autograd/wrap_util.py:20, in unary_to_nary..nary_operator..nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
—> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
File /opt/anaconda3/lib/python3.11/site-packages/pennylane/_grad.py:135, in grad._grad_with_forward(fun, x)
130 “”“This function is a replica of autograd.grad
, with the only
131 difference being that it returns both the gradient and the forward pass
132 value.”“”
133 vjp, ans = _make_vjp(fun, x)
→ 135 if not vspace(ans).size == 1:
136 raise TypeError(
137 "Grad only applies to real scalar-output functions. "
138 “Try jacobian, elementwise_grad or holomorphic_grad.”
139 )
141 grad_value = vjp(vspace(ans).ones())
File /opt/anaconda3/lib/python3.11/site-packages/autograd/core.py:238, in vspace(value)
236 return vspace(getval(value))
237 else:
→ 238 raise TypeError("Can’t find vector space for value {} of type {}. "
239 “Valid types are {}”.format(
240 value, type(value), VSpace.mappings.keys()))
TypeError: Can’t find vector space for value expval(Hermitian(tensor([[ 0.86332707, 0.01671081, 0.07991447, 0.0854049 ],
[ 0.01671081, 0.82379638, -0.07695947, 0.03131549],
[ 0.07991447, -0.07695947, 0.8355417 , -0.11345916],
[ 0.0854049 , 0.03131549, -0.11345916, 0.75815689]], requires_grad=False), wires=[0, 1])) of type <class ‘pennylane.measurements.expval.ExpectationMP’>. Valid types are dict_keys([<class ‘autograd.core.SparseObject’>, <class ‘list’>, <class ‘tuple’>, <class ‘dict’>, <class ‘numpy.ndarray’>, <class ‘float’>, <class ‘numpy.float128’>, <class ‘numpy.float64’>, <class ‘numpy.float32’>, <class ‘numpy.float16’>, <class ‘complex’>, <class ‘numpy.complex256’>, <class ‘numpy.complex64’>, <class ‘numpy.complex128’>, <class ‘pennylane.numpy.tensor.tensor’>])