I am using a Jetson Xavier NX with Ubuntu 20.04. The nvcc version is
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2022 NVIDIA Corporation
Built on Sun_Oct_23_22:16:07_PDT_2022
Cuda compilation tools, release 11.4, V11.4.315
Build cuda_11.4.r11.4/compiler.31964100_0
So this is my code:
wires=4
dev4 = qml.device("lightning.gpu", wires=wires )
@qml.qnode(dev4)
def CONVCircuit(phi, wires, i=0):
"""
quantum convolution Node
"""
# parameter
theta = np.pi / 2
qml.Rot(phi[0]*2*np.pi/255,phi[1]*2*np.pi/255,phi[2]*2*np.pi/255, wires=0)
qml.Rot(phi[3]*2*np.pi/255,phi[4]*2*np.pi/255,phi[5]*2*np.pi/255, wires=1)
qml.Rot(phi[6]*2*np.pi/255,phi[7]*2*np.pi/255,phi[8]*2*np.pi/255, wires=2)
qml.Rot(phi[9]*2*np.pi/255,phi[10]*2*np.pi/255,phi[11]*2*np.pi/255, wires=3)
qml.RX(np.pi, wires=0)
qml.RX(np.pi, wires=1)
qml.RX(np.pi, wires=2)
qml.RX(np.pi, wires=3)
qml.CRZ(theta, wires=[1, 0])
qml.CRZ(theta, wires=[3, 2])
qml.CRX(theta, wires=[1, 0])
qml.CRX(theta, wires=[3, 2])
qml.CRZ(theta, wires=[2, 0])
qml.CRX(theta, wires=[2, 0])
# Expectation value
measurement = qml.expval(qml.PauliZ(wires=0))
return measurement
This is the error message below:
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/qnode.py:847, in QNode.__call__(self, *args, **kwargs)
843 self._update_original_device()
845 return res
--> 847 res = qml.execute(
848 [self.tape],
849 device=self.device,
850 gradient_fn=self.gradient_fn,
851 interface=self.interface,
852 gradient_kwargs=self.gradient_kwargs,
853 override_shots=override_shots,
854 **self.execute_kwargs,
855 )
857 if old_interface == "auto":
858 self.interface = "auto"
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/interfaces/execution.py:724, in execute(tapes, device, gradient_fn, interface, mode, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform)
718 except ImportError as e:
719 raise qml.QuantumFunctionError(
720 f"{mapped_interface} not found. Please install the latest "
721 f"version of {mapped_interface} to enable the '{mapped_interface}' interface."
722 ) from e
--> 724 res = _execute(
725 tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode
726 )
728 return batch_fn(res)
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/interfaces/autograd.py:81, in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff, mode)
75 # pylint misidentifies autograd.builtins as a dict
76 # pylint: disable=no-member
77 parameters = autograd.builtins.tuple(
78 [autograd.builtins.list(t.get_parameters()) for t in tapes]
79 )
---> 81 return _execute(
82 parameters,
83 tapes=tapes,
84 device=device,
85 execute_fn=execute_fn,
86 gradient_fn=gradient_fn,
87 gradient_kwargs=gradient_kwargs,
88 _n=_n,
89 max_diff=max_diff,
90 )[0]
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/autograd/tracer.py:48, in primitive.<locals>.f_wrapped(*args, **kwargs)
46 return new_box(ans, trace, node)
47 else:
---> 48 return f_raw(*args, **kwargs)
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/interfaces/autograd.py:125, in _execute(parameters, tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff)
104 """Autodifferentiable wrapper around ``Device.batch_execute``.
105
106 The signature of this function is designed to work around Autograd restrictions.
(...)
122 understand the consequences!
123 """
124 with qml.tape.Unwrap(*tapes):
--> 125 res, jacs = execute_fn(tapes, **gradient_kwargs)
127 for i, r in enumerate(res):
128 if any(isinstance(m, CountsMP) for m in tapes[i].measurements):
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/interfaces/execution.py:206, in cache_execute.<locals>.wrapper(tapes, **kwargs)
202 return (res, []) if return_tuple else res
204 else:
205 # execute all unique tapes that do not exist in the cache
--> 206 res = fn(execution_tapes.values(), **kwargs)
208 final_res = []
210 for i, tape in enumerate(tapes):
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/interfaces/execution.py:131, in cache_execute.<locals>.fn(tapes, **kwargs)
129 def fn(tapes: Sequence[QuantumTape], **kwargs): # pylint: disable=function-redefined
130 tapes = [expand_fn(tape) for tape in tapes]
--> 131 return original_fn(tapes, **kwargs)
File ~/anaconda3/envs/myenv/lib/python3.10/contextlib.py:79, in ContextDecorator.__call__.<locals>.inner(*args, **kwds)
76 @wraps(func)
77 def inner(*args, **kwds):
78 with self._recreate_cm():
---> 79 return func(*args, **kwds)
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/_qubit_device.py:656, in QubitDevice.batch_execute(self, circuits)
653 self.reset()
655 # TODO: Insert control on value here
--> 656 res = self.execute(circuit)
657 results.append(res)
659 if self.tracker.active:
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/_qubit_device.py:445, in QubitDevice.execute(self, circuit, **kwargs)
443 results = self._collect_shotvector_results(circuit, counts_exist)
444 else:
--> 445 results = self.statistics(circuit=circuit)
447 if not circuit.is_sampled:
449 if len(measurements) == 1:
File ~/pennylane-lightning-gpu-0.28.0_rc0/pennylane_lightning_gpu/lightning_gpu.py:407, in LightningGPU.statistics(self, circuit, shot_range, bin_size)
405 def statistics(self, circuit, shot_range=None, bin_size=None):
406 ## Ensure D2H sync before calculating non-GPU supported operations
--> 407 return super().statistics(circuit, shot_range, bin_size)
File ~/anaconda3/envs/myenv/lib/python3.10/site-packages/pennylane/_qubit_device.py:807, in QubitDevice.statistics(self, observables, shot_range, bin_size, circuit)
803 # TODO: Remove return_type when `observables` argument is removed from this method
804 # Pass instances directly
805 elif obs.return_type is Expectation:
806 # Appends a result of shape (num_bins,) if bin_size is not None, else a scalar
--> 807 results.append(self.expval(obs, shot_range=shot_range, bin_size=bin_size))
809 elif obs.return_type is Variance:
810 # Appends a result of shape (num_bins,) if bin_size is not None, else a scalar
811 results.append(self.var(obs, shot_range=shot_range, bin_size=bin_size))
File ~/pennylane-lightning-gpu-0.28.0_rc0/pennylane_lightning_gpu/lightning_gpu.py:741, in LightningGPU.expval(self, observable, shot_range, bin_size)
729 return self._gpu_state.ExpectationValue(
730 device_wires, qml.matrix(observable).ravel(order="C")
731 )
733 par = (
734 observable.parameters
735 if (
(...)
739 else []
740 )
--> 741 return self._gpu_state.ExpectationValue(
742 observable.name,
743 self.wires.indices(observable.wires),
744 par, # observables should not pass parameters, use matrix instead
745 qml.matrix(observable).ravel(order="C"),
746 )
PLException: [/home/auvmu/pennylane-lightning-gpu-0.28.0_rc0/pennylane_lightning_gpu/src/simulator/StateVectorCudaManaged.hpp][Line:1683][Method:getExpectationValueDeviceMatrix]: Error in PennyLane Lightning: Internal custatevec error
The output of qml.about()
.
Name: PennyLane
Version: 0.28.0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /home/auvmu/anaconda3/envs/myenv/lib/python3.10/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning, PennyLane-Lightning-GPU
Platform info: Linux-5.10.104-tegra-aarch64-with-glibc2.31
Python version: 3.10.12
Numpy version: 1.23.5
Scipy version: 1.11.1
Installed devices:
- default.gaussian (PennyLane-0.28.0)
- default.mixed (PennyLane-0.28.0)
- default.qubit (PennyLane-0.28.0)
- default.qubit.autograd (PennyLane-0.28.0)
- default.qubit.jax (PennyLane-0.28.0)
- default.qubit.tf (PennyLane-0.28.0)
- default.qubit.torch (PennyLane-0.28.0)
- default.qutrit (PennyLane-0.28.0)
- null.qubit (PennyLane-0.28.0)
- lightning.qubit (PennyLane-Lightning-0.28.0)
- lightning.gpu (PennyLane-Lightning-GPU-0.28.0)