Hi @CatalinaAlbornoz ,
I use only pennylane numpy (from pennylane import numpy as np
) everytime like you showed. When I change the device from lightning.qubit
to default.qubit
and diff_method from adjoint
to best
, surprisingly it is giving some output although different ones.
qml.grad(normal_cost, argnum=1)(x, y)
>>-4.315619113490985
qml.grad(dask_cost, argnum=1)(x, y)
>>-0.8970631311618367
However, I followed your suggestion with python 3.10 version. And I have not changed numpy version as pennylane installs its own numpy. The error occured to me when i use lightning qubit and adjoint diff method.
Following is the qml.about()
Name: PennyLane
Version: 0.29.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: c:\users\setty\anaconda3\envs\temp\lib\site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning
Platform info: Windows-10-10.0.19042-SP0
Python version: 3.10.9
Numpy version: 1.23.5
Scipy version: 1.10.1
Installed devices:
- default.gaussian (PennyLane-0.29.1)
- default.mixed (PennyLane-0.29.1)
- default.qubit (PennyLane-0.29.1)
- default.qubit.autograd (PennyLane-0.29.1)
- default.qubit.jax (PennyLane-0.29.1)
- default.qubit.tf (PennyLane-0.29.1)
- default.qubit.torch (PennyLane-0.29.1)
- default.qutrit (PennyLane-0.29.1)
- null.qubit (PennyLane-0.29.1)
- lightning.qubit (PennyLane-Lightning-0.29.0)
Error traceback,
qml.grad(dask_cost, argnum=1)(x, y)
2023-03-21 11:32:27,376 - distributed.protocol.core - CRITICAL - Failed to deserialize
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\core.py", line 158, in loads
return msgpack.loads(
File "msgpack\_unpacker.pyx", line 194, in msgpack._cmsgpack.unpackb
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\core.py", line 138, in _decode_default
return merge_and_deserialize(
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 497, in merge_and_deserialize
return deserialize(header, merged_frames, deserializers=deserializers)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 426, in deserialize
return loads(header, frames)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 180, in serialization_error_loads
raise TypeError(msg)
TypeError: Could not serialize object of type ArrayBox
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 63, in dumps
result = pickle.dumps(x, **dump_kwargs)
AttributeError: Can't pickle local object 'VJPNode.initialize_root.<locals>.<lambda>'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 68, in dumps
pickler.dump(x)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 24, in reducer_override
if _always_use_pickle_for(obj):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 45, in _always_use_pickle_for
return isinstance(x, (str, bytes))
RecursionError: maximum recursion depth exceeded in __instancecheck__
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 347, in serialize
header, frames = dumps(x, context=context) if wants_context else dumps(x)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 71, in pickle_dumps
frames[0] = pickle.dumps(
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 81, in dumps
result = cloudpickle.dumps(x, **dump_kwargs)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\cloudpickle\cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\cloudpickle\cloudpickle_fast.py", line 632, in dump
return Pickler.dump(self, obj)
TypeError: cannot pickle 'generator' object
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[47], line 1
----> 1 qml.grad(dask_cost, argnum=1)(x, y)
File ~\Anaconda3\envs\temp\lib\site-packages\pennylane\_grad.py:115, in grad.__call__(self, *args, **kwargs)
112 self._forward = self._fun(*args, **kwargs)
113 return ()
--> 115 grad_value, ans = grad_fn(*args, **kwargs)
116 self._forward = ans
118 return grad_value
File ~\Anaconda3\envs\temp\lib\site-packages\autograd\wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
18 else:
19 x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)
File ~\Anaconda3\envs\temp\lib\site-packages\pennylane\_grad.py:133, in grad._grad_with_forward(fun, x)
127 @staticmethod
128 @unary_to_nary
129 def _grad_with_forward(fun, x):
130 """This function is a replica of ``autograd.grad``, with the only
131 difference being that it returns both the gradient *and* the forward pass
132 value."""
--> 133 vjp, ans = _make_vjp(fun, x)
135 if not vspace(ans).size == 1:
136 raise TypeError(
137 "Grad only applies to real scalar-output functions. "
138 "Try jacobian, elementwise_grad or holomorphic_grad."
139 )
File ~\Anaconda3\envs\temp\lib\site-packages\autograd\core.py:10, in make_vjp(fun, x)
8 def make_vjp(fun, x):
9 start_node = VJPNode.new_root()
---> 10 end_value, end_node = trace(start_node, fun, x)
11 if end_node is None:
12 def vjp(g): return vspace(x).zeros()
File ~\Anaconda3\envs\temp\lib\site-packages\autograd\tracer.py:10, in trace(start_node, fun, x)
8 with trace_stack.new_trace() as t:
9 start_box = new_box(x, t, start_node)
---> 10 end_box = fun(start_box)
11 if isbox(end_box) and end_box._trace == start_box._trace:
12 return end_box._value, end_box._node
File ~\Anaconda3\envs\temp\lib\site-packages\autograd\wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
13 else:
14 subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)
Cell In[43], line 6, in dask_cost(x, y)
4 val = dask.delayed(circuit)(x[i], y)
5 temp.append(val)
----> 6 expvals = dask.compute(*temp)
7 expvals = np.sum(np.array(expvals))
8 return expvals
File ~\Anaconda3\envs\temp\lib\site-packages\dask\base.py:599, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
596 keys.append(x.__dask_keys__())
597 postcomputes.append(x.__dask_postcompute__())
--> 599 results = schedule(dsk, keys, **kwargs)
600 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\client.py:3168, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
3166 should_rejoin = False
3167 try:
-> 3168 results = self.gather(packed, asynchronous=asynchronous, direct=direct)
3169 finally:
3170 for f in futures.values():
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\client.py:2328, in Client.gather(self, futures, errors, direct, asynchronous)
2326 else:
2327 local_worker = None
-> 2328 return self.sync(
2329 self._gather,
2330 futures,
2331 errors=errors,
2332 direct=direct,
2333 local_worker=local_worker,
2334 asynchronous=asynchronous,
2335 )
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\utils.py:345, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
343 return future
344 else:
--> 345 return sync(
346 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
347 )
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\utils.py:412, in sync(loop, func, callback_timeout, *args, **kwargs)
410 if error:
411 typ, exc, tb = error
--> 412 raise exc.with_traceback(tb)
413 else:
414 return result
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\utils.py:385, in sync.<locals>.f()
383 future = wait_for(future, callback_timeout)
384 future = asyncio.ensure_future(future)
--> 385 result = yield future
386 except Exception:
387 error = sys.exc_info()
File ~\Anaconda3\envs\temp\lib\site-packages\tornado\gen.py:769, in Runner.run(self)
766 exc_info = None
768 try:
--> 769 value = future.result()
770 except Exception:
771 exc_info = sys.exc_info()
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\client.py:2220, in Client._gather(self, futures, errors, direct, local_worker)
2218 else:
2219 self._gather_future = future
-> 2220 response = await future
2222 if response["status"] == "error":
2223 log = logger.warning if errors == "raise" else logger.debug
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\client.py:2271, in Client._gather_remote(self, direct, local_worker)
2268 response["data"].update(data2)
2270 else: # ask scheduler to gather data for us
-> 2271 response = await retry_operation(self.scheduler.gather, keys=keys)
2273 return response
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\utils_comm.py:434, in retry_operation(coro, operation, *args, **kwargs)
428 retry_delay_min = parse_timedelta(
429 dask.config.get("distributed.comm.retry.delay.min"), default="s"
430 )
431 retry_delay_max = parse_timedelta(
432 dask.config.get("distributed.comm.retry.delay.max"), default="s"
433 )
--> 434 return await retry(
435 partial(coro, *args, **kwargs),
436 count=retry_count,
437 delay_min=retry_delay_min,
438 delay_max=retry_delay_max,
439 operation=operation,
440 )
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\utils_comm.py:413, in retry(coro, count, delay_min, delay_max, jitter_fraction, retry_on_exceptions, operation)
411 delay *= 1 + random.random() * jitter_fraction
412 await asyncio.sleep(delay)
--> 413 return await coro()
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\core.py:1234, in PooledRPCCall.__getattr__.<locals>.send_recv_from_rpc(**kwargs)
1232 prev_name, comm.name = comm.name, "ConnectionPool." + key
1233 try:
-> 1234 return await send_recv(comm=comm, op=key, **kwargs)
1235 finally:
1236 self.pool.reuse(self.addr, comm)
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\core.py:993, in send_recv(comm, reply, serializers, deserializers, **kwargs)
991 await comm.write(msg, serializers=serializers, on_error="raise")
992 if reply:
--> 993 response = await comm.read(deserializers=deserializers)
994 else:
995 response = None
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\comm\tcp.py:254, in TCP.read(self, deserializers)
251 try:
252 frames = unpack_frames(frames)
--> 254 msg = await from_frames(
255 frames,
256 deserialize=self.deserialize,
257 deserializers=deserializers,
258 allow_offload=self.allow_offload,
259 )
260 except EOFError:
261 # Frames possibly garbled or truncated by communication error
262 self.abort()
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\comm\utils.py:100, in from_frames(frames, deserialize, deserializers, allow_offload)
98 res = await offload(_from_frames)
99 else:
--> 100 res = _from_frames()
102 return res
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\comm\utils.py:83, in from_frames.<locals>._from_frames()
81 def _from_frames():
82 try:
---> 83 return protocol.loads(
84 frames, deserialize=deserialize, deserializers=deserializers
85 )
86 except EOFError:
87 if size > 1000:
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\core.py:158, in loads(frames, deserialize, deserializers)
152 raise ValueError(
153 "Unpickle on the Scheduler isn't allowed, set `distributed.scheduler.pickle=true`"
154 )
156 return msgpack_decode_default(obj)
--> 158 return msgpack.loads(
159 frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts
160 )
162 except Exception:
163 logger.critical("Failed to deserialize", exc_info=True)
File msgpack\_unpacker.pyx:194, in msgpack._cmsgpack.unpackb()
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\core.py:138, in loads.<locals>._decode_default(obj)
136 if "compression" in sub_header:
137 sub_frames = decompress(sub_header, sub_frames)
--> 138 return merge_and_deserialize(
139 sub_header, sub_frames, deserializers=deserializers
140 )
141 else:
142 return Serialized(sub_header, sub_frames)
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py:497, in merge_and_deserialize(header, frames, deserializers)
493 merged = bytearray().join(subframes)
495 merged_frames.append(merged)
--> 497 return deserialize(header, merged_frames, deserializers=deserializers)
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py:426, in deserialize(header, frames, deserializers)
421 raise TypeError(
422 "Data serialized with %s but only able to deserialize "
423 "data with %s" % (name, str(list(deserializers)))
424 )
425 dumps, loads, wants_context = families[name]
--> 426 return loads(header, frames)
File ~\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py:180, in serialization_error_loads(header, frames)
178 def serialization_error_loads(header, frames):
179 msg = "\n".join([codecs.decode(frame, "utf8") for frame in frames])
--> 180 raise TypeError(msg)
TypeError: Could not serialize object of type ArrayBox
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 63, in dumps
result = pickle.dumps(x, **dump_kwargs)
AttributeError: Can't pickle local object 'VJPNode.initialize_root.<locals>.<lambda>'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 68, in dumps
pickler.dump(x)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 24, in reducer_override
if _always_use_pickle_for(obj):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 45, in _always_use_pickle_for
return isinstance(x, (str, bytes))
RecursionError: maximum recursion depth exceeded in __instancecheck__
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 347, in serialize
header, frames = dumps(x, context=context) if wants_context else dumps(x)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\serialize.py", line 71, in pickle_dumps
frames[0] = pickle.dumps(
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\distributed\protocol\pickle.py", line 81, in dumps
result = cloudpickle.dumps(x, **dump_kwargs)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\cloudpickle\cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "C:\Users\setty\Anaconda3\envs\temp\lib\site-packages\cloudpickle\cloudpickle_fast.py", line 632, in dump
return Pickler.dump(self, obj)
TypeError: cannot pickle 'generator' object
I think you are right. If its working for you then it should work somehow, I am just missing something here.
Thanks for the help