“from pennylane import numpy as np” leads to ‘type error: iterating over 0-d array’ and no further training is possible. Using “import numpy as np” avoids this error, but Penny reminds me to set the required_grad parameter on some arguments. So I’m in a mess! This similar problem was mentioned in forum Gradients of quantum generator with tf.interface is none - #10 by Pavan, but no solution was given.
以下是我的代码范例用了4个qumode对MNIST进行2分类:
num_modes = 4
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
dev = qml.device('strawberryfields.fock', wires=num_modes, cutoff_dim=cutoff_dim)
@qml.qnode(dev)
def PGKSAN(inputs, params):
. ....
return qml.expval(qml.X(num_modes - 1))
def cost(params, features, labels):
predictions = [PGKSAN(inputs, params) for inputs in features]
return square_loss(labels, predictions)
def TrainQKSAN():
opt = qml.NesterovMomentumOptimizer(0.2)
batch_size = 15
weights = params_init
#bias = np.array(0.0, requires_grad=True)
his_acc_val = []
his_acc_train = []
his_cost = []
for it in range(30):
# Update the weights by one optimizer step
batch_index = np.random.randint(0, 2 * train_size, (batch_size,))
feats_train_batch = feats_train[batch_index]
print(feats_train_batch.shape)
Y_train_batch = Y_train[batch_index]
#cost(params, bias, features, labels):
(weights, _, _), _ = opt.step_and_cost(cost, weights, feats_train_batch, Y_train_batch)
#(var, _, _), _cost = opt.step_and_cost(cost, var, X, Y)
# Compute predictions on train and validation set
predictions_train = [np.sign(PGKSAN(inputs, weights)) for inputs in feats_train]
predictions_val = [np.sign(PGKSAN(inputs, weights)) for inputs in feats_val]
# Compute accuracy on train and validation set
acc_train = accuracy(Y_train, predictions_train)
acc_val = accuracy(Y_val, predictions_val)
his_acc_val.append(acc_val)
his_acc_train.append(acc_train)
his_cost.append(cost(weights, features, Y))
print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost(weights, features, Y), acc_train, acc_val))
return his_acc_val, his_acc_train, np.array(his_cost)
The error is:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[4], line 1
----> 1 his_acc_val1, his_acc_train1, his_cost1 = TrainQKSAN()
3 print(his_acc_val1, his_acc_train1, his_cost1)
Cell In[3], line 18, in TrainQKSAN()
16 Y_train_batch = Y_train[batch_index]
17 #cost(params, bias, features, labels):
---> 18 (weights, _, _), _ = opt.step_and_cost(cost, weights, feats_train_batch, Y_train_batch)
19 #(var, _, _), _cost = opt.step_and_cost(cost, var, X, Y)
20 # Compute predictions on train and validation set
21 predictions_train = [np.sign(PGKSAN(inputs, weights)) for inputs in feats_train]
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/optimize/gradient_descent.py:59, in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
39 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
40 """Update trainable arguments with one step of the optimizer and return the corresponding
41 objective function value prior to the step.
42
(...)
56 If single arg is provided, list [array] is replaced by array.
57 """
---> 59 g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
60 new_args = self.apply_grad(g, args)
62 if forward is None:
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/optimize/nesterov_momentum.py:71, in NesterovMomentumOptimizer.compute_grad(self, objective_fn, args, kwargs, grad_fn)
68 shifted_args[index] = args[index] - self.momentum * self.accumulation[index]
70 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
---> 71 grad = g(*shifted_args, **kwargs)
72 forward = getattr(g, "forward", None)
74 grad = (grad,) if len(trainable_indices) == 1 else grad
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_grad.py:115, in grad.__call__(self, *args, **kwargs)
112 self._forward = self._fun(*args, **kwargs)
113 return ()
--> 115 grad_value, ans = grad_fn(*args, **kwargs)
116 self._forward = ans
118 return grad_value
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_grad.py:133, in grad._grad_with_forward(fun, x)
127 @staticmethod
128 @unary_to_nary
129 def _grad_with_forward(fun, x):
130 """This function is a replica of ``autograd.grad``, with the only
131 difference being that it returns both the gradient *and* the forward pass
132 value."""
--> 133 vjp, ans = _make_vjp(fun, x)
135 if not vspace(ans).size == 1:
136 raise TypeError(
137 "Grad only applies to real scalar-output functions. "
138 "Try jacobian, elementwise_grad or holomorphic_grad."
139 )
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/core.py:10, in make_vjp(fun, x)
8 def make_vjp(fun, x):
9 start_node = VJPNode.new_root()
---> 10 end_value, end_node = trace(start_node, fun, x)
11 if end_node is None:
12 def vjp(g): return vspace(x).zeros()
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:10, in trace(start_node, fun, x)
8 with trace_stack.new_trace() as t:
9 start_box = new_box(x, t, start_node)
---> 10 end_box = fun(start_box)
11 if isbox(end_box) and end_box._trace == start_box._trace:
12 return end_box._value, end_box._node
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
13 else:
14 subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)
Cell In[2], line 38, in cost(params, features, labels)
37 def cost(params, features, labels):
---> 38 predictions = [PGKSAN(inputs, params) for inputs in features]
39 return square_loss(labels, predictions)
Cell In[2], line 38, in <listcomp>(.0)
37 def cost(params, features, labels):
---> 38 predictions = [PGKSAN(inputs, params) for inputs in features]
39 return square_loss(labels, predictions)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/qnode.py:889, in QNode.__call__(self, *args, **kwargs)
885 self._update_original_device()
887 return res
--> 889 res = qml.execute(
890 [self.tape],
891 device=self.device,
892 gradient_fn=self.gradient_fn,
893 interface=self.interface,
894 gradient_kwargs=self.gradient_kwargs,
895 override_shots=override_shots,
896 **self.execute_kwargs,
897 )
899 if old_interface == "auto":
900 self.interface = "auto"
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:729, in execute(tapes, device, gradient_fn, interface, mode, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform)
723 except ImportError as e:
724 raise qml.QuantumFunctionError(
725 f"{mapped_interface} not found. Please install the latest "
726 f"version of {mapped_interface} to enable the '{mapped_interface}' interface."
727 ) from e
--> 729 res = _execute(
730 tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode
731 )
733 return batch_fn(res)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/autograd.py:81, in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff, mode)
75 # pylint misidentifies autograd.builtins as a dict
76 # pylint: disable=no-member
77 parameters = autograd.builtins.tuple(
78 [autograd.builtins.list(t.get_parameters()) for t in tapes]
79 )
---> 81 return _execute(
82 parameters,
83 tapes=tapes,
84 device=device,
85 execute_fn=execute_fn,
86 gradient_fn=gradient_fn,
87 gradient_kwargs=gradient_kwargs,
88 _n=_n,
89 max_diff=max_diff,
90 )[0]
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:44, in primitive.<locals>.f_wrapped(*args, **kwargs)
42 parents = tuple(box._node for _ , box in boxed_args)
43 argnums = tuple(argnum for argnum, _ in boxed_args)
---> 44 ans = f_wrapped(*argvals, **kwargs)
45 node = node_constructor(ans, f_wrapped, argvals, kwargs, argnums, parents)
46 return new_box(ans, trace, node)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:48, in primitive.<locals>.f_wrapped(*args, **kwargs)
46 return new_box(ans, trace, node)
47 else:
---> 48 return f_raw(*args, **kwargs)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/autograd.py:125, in _execute(parameters, tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff)
104 """Autodifferentiable wrapper around ``Device.batch_execute``.
105
106 The signature of this function is designed to work around Autograd restrictions.
(...)
122 understand the consequences!
123 """
124 with qml.tape.Unwrap(*tapes):
--> 125 res, jacs = execute_fn(tapes, **gradient_kwargs)
127 for i, r in enumerate(res):
128 if any(isinstance(m, CountsMP) for m in tapes[i].measurements):
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:205, in cache_execute.<locals>.wrapper(tapes, **kwargs)
201 return (res, []) if return_tuple else res
203 else:
204 # execute all unique tapes that do not exist in the cache
--> 205 res = fn(execution_tapes.values(), **kwargs)
207 final_res = []
209 for i, tape in enumerate(tapes):
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:131, in cache_execute.<locals>.fn(tapes, **kwargs)
129 def fn(tapes: Sequence[QuantumTape], **kwargs): # pylint: disable=function-redefined
130 tapes = [expand_fn(tape) for tape in tapes]
--> 131 return original_fn(tapes, **kwargs)
File /opt/conda/envs/penny-sf/lib/python3.9/contextlib.py:79, in ContextDecorator.__call__.<locals>.inner(*args, **kwds)
76 @wraps(func)
77 def inner(*args, **kwds):
78 with self._recreate_cm():
---> 79 return func(*args, **kwds)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_device.py:530, in Device.batch_execute(self, circuits)
525 for circuit in circuits:
526 # we need to reset the device here, else it will
527 # not start the next computation in the zero state
528 self.reset()
--> 530 res = self.execute(circuit.operations, circuit.observables)
531 results.append(res)
533 if self.tracker.active:
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_device.py:459, in Device.execute(self, queue, observables, parameters, **kwargs)
455 self.apply(operation.name, operation.wires, operation.parameters)
457 self.post_apply()
--> 459 self.pre_measure()
461 for obs in observables:
462 if isinstance(obs, Tensor):
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane_sf/fock.py:131, in StrawberryFieldsFock.pre_measure(self)
129 def pre_measure(self):
130 self.eng = sf.Engine("fock", backend_options={"cutoff_dim": self.cutoff})
--> 131 results = self.eng.run(self.prog)
133 self.state = results.state
134 self.samples = results.samples
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/engine.py:570, in LocalEngine.run(self, program, args, compile_options, **kwargs)
565 if c.op.measurement_deps and eng_run_options["shots"] > 1:
566 raise NotImplementedError(
567 "Feed-forwarding of measurements cannot be used together with multiple shots."
568 )
--> 570 return super()._run(
571 program_lst, args=args, compile_options=compile_options, **eng_run_options
572 )
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/engine.py:276, in BaseEngine._run(self, program, args, compile_options, **kwargs)
274 # compile the program for the correct backend if a compiler or a device exists
275 if "compiler" in compile_options or "device" in compile_options:
--> 276 p = p.compile(**compile_options)
278 received_rolled = False # whether a TDMProgram had a rolled circuit
279 if isinstance(p, TDMProgram):
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/program.py:726, in Program.compile(self, device, compiler, **kwargs)
723 compiler = _get_compiler(compiler)
724 target = compiler.short_name
--> 726 seq = compiler.decompose(self.circuit)
728 if kwargs.get("warn_connected", True):
729 DAG = pu.list_to_DAG(seq)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/compilers/compiler.py:279, in Compiler.decompose(self, seq)
277 try:
278 kwargs = self.decompositions[op_name]
--> 279 temp = cmd.op.decompose(cmd.reg, **kwargs)
280 # now compile the decomposition
281 temp = self.decompose(temp)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/ops.py:477, in Gate.decompose(self, reg, **kwargs)
472 def decompose(self, reg, **kwargs):
473 """Decompose the operation into elementary operations supported by the backend API.
474
475 Like :func:`Operation.decompose`, but applies self.dagger.
476 """
--> 477 seq = self._decompose(reg, **kwargs)
478 if self.dagger:
479 # apply daggers, reverse the Command sequence
480 for cmd in seq:
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/ops.py:2142, in CXgate._decompose(self, reg, **kwargs)
2140 def _decompose(self, reg, **kwargs):
2141 s = self.p[0]
-> 2142 r = pf.asinh(-s / 2)
2143 theta = 0.5 * pf.atan2(-1.0 / pf.cosh(r), -pf.tanh(r))
2144 return [
2145 Command(BSgate(theta, 0), reg),
2146 Command(Sgate(r, 0), reg[0]),
2147 Command(Sgate(-r, 0), reg[1]),
2148 Command(BSgate(theta + np.pi / 2, 0), reg),
2149 ]
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/parameters.py:129, in wrap_mathfunc.<locals>.wrapper(*args)
125 raise ValueError(
126 "Parameter functions with array arguments: all the arguments must be arrays of the same shape."
127 )
128 # apply func elementwise, recursively, on the args
--> 129 return np.array([wrapper(*k) for k in zip(*args)])
130 return func(*args)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/parameters.py:129, in <listcomp>(.0)
125 raise ValueError(
126 "Parameter functions with array arguments: all the arguments must be arrays of the same shape."
127 )
128 # apply func elementwise, recursively, on the args
--> 129 return np.array([wrapper(*k) for k in zip(*args)])
130 return func(*args)
File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/parameters.py:129, in wrap_mathfunc.<locals>.wrapper(*args)
125 raise ValueError(
126 "Parameter functions with array arguments: all the arguments must be arrays of the same shape."
127 )
128 # apply func elementwise, recursively, on the args
--> 129 return np.array([wrapper(*k) for k in zip(*args)])
130 return func(*args)
TypeError: iteration over a 0-d array
My version of penny is:
Name: PennyLane
Version: 0.29.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /opt/conda/envs/penny-sf/lib/python3.9/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning, PennyLane-SF
Platform info: Linux-4.18.0-348.2.1.el8_5.x86_64-x86_64-with-glibc2.31
Python version: 3.9.19
Numpy version: 1.23.5
Scipy version: 1.13.1
Installed devices:
- default.gaussian (PennyLane-0.29.1)
- default.mixed (PennyLane-0.29.1)
- default.qubit (PennyLane-0.29.1)
- default.qubit.autograd (PennyLane-0.29.1)
- default.qubit.jax (PennyLane-0.29.1)
- default.qubit.tf (PennyLane-0.29.1)
- default.qubit.torch (PennyLane-0.29.1)
- default.qutrit (PennyLane-0.29.1)
- null.qubit (PennyLane-0.29.1)
- lightning.qubit (PennyLane-Lightning-0.30.0)
- strawberryfields.fock (PennyLane-SF-0.29.1)
- strawberryfields.gaussian (PennyLane-SF-0.29.1)
- strawberryfields.gbs (PennyLane-SF-0.29.1)
- strawberryfields.remote (PennyLane-SF-0.29.1)
- strawberryfields.tf (PennyLane-SF-0.29.1)