Hello, I am experimenting with the parameter broadcasting for quantum circuits in which the quantum circuit is created as a torch layer and I found some weird things happening. Following is the code I am using.
import pennylane as qml
from pennylane import numpy as np
import time as time
import torch
import torch.nn as nn
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface = 'torch')
def simple_qubit_circuit(theta, inputs):
qml.RX(inputs, wires=0)
qml.RY(theta, wires=0)
return qml.expval(qml.PauliZ(0))
class QNet(nn.Module):
def __init__(self):
super().__init__()
shapes = {
"theta": (1,)
}
self.q = qml.qnn.TorchLayer(simple_qubit_circuit, shapes)
def forward(self, input_value):
return self.q(input_value)
x_train = np.array([0.2, 0.1, 0.2, 0.14, 0.11, 0.41, 0.55, 0.3, 0.31, 0.6])
x_train = torch.tensor(x_train).reshape(10,1)
# Problem 1
# x_train = torch.rand(10,1)
# x_train = torch.atan(x_train)
model = QNet()
t1 = time.time()
out = model(x_train)
print("time taken for batch operations: ", time.time()-t1)
out2 = []
t2 = time.time()
for x in x_train:
out2.append(model(x).item())
print("time taken for sequential operations: ", time.time()-t2)
print(out)
print(out2)
Problem 1: If I use a tensor created using torch.rand() method, I get the following error. However, this error is not there when I used a tensor created with a numpy vector. I am not sure why this happens.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_12504\1395714778.py in <cell line: 9>()
7 model = QNet()
8 t1 = time.time()
----> 9 out = model(x_train)
10 print("time taken for batch operations: ", time.time()-t1)
11 out2 = []
~\Miniconda3\envs\qns\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
~\AppData\Local\Temp\ipykernel_12504\4054581433.py in forward(self, input_value)
15
16 def forward(self, input_value):
---> 17 return self.q(input_value)
~\Miniconda3\envs\qns\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
~\Miniconda3\envs\qns\lib\site-packages\pennylane\qnn\torch.py in forward(self, inputs)
307 # recursively call the forward pass on each of the yielded tensors, and then stack the
308 # outputs back into the correct shape
--> 309 reconstructor = [self.forward(x) for x in torch.unbind(inputs)]
310 return torch.stack(reconstructor)
311
~\Miniconda3\envs\qns\lib\site-packages\pennylane\qnn\torch.py in <listcomp>(.0)
307 # recursively call the forward pass on each of the yielded tensors, and then stack the
308 # outputs back into the correct shape
--> 309 reconstructor = [self.forward(x) for x in torch.unbind(inputs)]
310 return torch.stack(reconstructor)
311
~\Miniconda3\envs\qns\lib\site-packages\pennylane\qnn\torch.py in forward(self, inputs)
311
312 # If the input is 1-dimensional, calculate the forward pass as usual
--> 313 return self._evaluate_qnode(inputs)
314
315 def _evaluate_qnode(self, x):
~\Miniconda3\envs\qns\lib\site-packages\pennylane\qnn\torch.py in _evaluate_qnode(self, x)
326 **{arg: weight.to(x) for arg, weight in self.qnode_weights.items()},
327 }
--> 328 return self.qnode(**kwargs).type(x.dtype)
329
330 def _init_weights(
~\Miniconda3\envs\qns\lib\site-packages\pennylane\qnode.py in __call__(self, *args, **kwargs)
845 return res
846
--> 847 res = qml.execute(
848 [self.tape],
849 device=self.device,
~\Miniconda3\envs\qns\lib\site-packages\pennylane\interfaces\execution.py in execute(tapes, device, gradient_fn, interface, mode, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform)
722 ) from e
723
--> 724 res = _execute(
725 tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode
726 )
~\Miniconda3\envs\qns\lib\site-packages\pennylane\interfaces\torch.py in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff, mode)
256 max_diff=max_diff,
257 )
--> 258 return ExecuteTapes.apply(kwargs, *parameters)
259
260
~\Miniconda3\envs\qns\lib\site-packages\pennylane\interfaces\torch.py in forward(ctx, kwargs, *parameters)
85
86 with qml.tape.Unwrap(*ctx.tapes):
---> 87 res, ctx.jacs = ctx.execute_fn(ctx.tapes, **ctx.gradient_kwargs)
88
89 # if any input tensor uses the GPU, the output should as well
~\Miniconda3\envs\qns\lib\site-packages\pennylane\interfaces\execution.py in wrapper(tapes, **kwargs)
204 else:
205 # execute all unique tapes that do not exist in the cache
--> 206 res = fn(execution_tapes.values(), **kwargs)
207
208 final_res = []
~\Miniconda3\envs\qns\lib\site-packages\pennylane\interfaces\execution.py in fn(tapes, **kwargs)
129 def fn(tapes: Sequence[QuantumTape], **kwargs): # pylint: disable=function-redefined
130 tapes = [expand_fn(tape) for tape in tapes]
--> 131 return original_fn(tapes, **kwargs)
132
133 @wraps(fn)
~\Miniconda3\envs\qns\lib\contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
~\Miniconda3\envs\qns\lib\site-packages\pennylane\_qubit_device.py in batch_execute(self, circuits)
654
655 # TODO: Insert control on value here
--> 656 res = self.execute(circuit)
657 results.append(res)
658
~\Miniconda3\envs\qns\lib\site-packages\pennylane\_qubit_device.py in execute(self, circuit, **kwargs)
434 # generate computational basis samples
435 if self.shots is not None or circuit.is_sampled:
--> 436 self._samples = self.generate_samples()
437
438 measurements = circuit.measurements
~\Miniconda3\envs\qns\lib\site-packages\pennylane\_qubit_device.py in generate_samples(self)
1216 rotated_prob = self.analytic_probability()
1217
-> 1218 samples = self.sample_basis_states(number_of_states, rotated_prob)
1219 return self.states_to_binary(samples, self.num_wires)
1220
~\Miniconda3\envs\qns\lib\site-packages\pennylane\_qubit_device.py in sample_basis_states(self, number_of_states, state_probability)
1244 # np.random.choice does not support broadcasting as needed here.
1245 return np.array(
-> 1246 [np.random.choice(basis_states, shots, p=prob) for prob in state_probability]
1247 )
1248
~\Miniconda3\envs\qns\lib\site-packages\pennylane\_qubit_device.py in <listcomp>(.0)
1244 # np.random.choice does not support broadcasting as needed here.
1245 return np.array(
-> 1246 [np.random.choice(basis_states, shots, p=prob) for prob in state_probability]
1247 )
1248
mtrand.pyx in numpy.random.mtrand.RandomState.choice()
ValueError: probabilities do not sum to 1
Problem 2:
The time taken for batch operation is higher compared to the time taken for sequential operations. Is true parameter broadcasting yet not implemented in pennylane for qml.qnn.TorchLayer() ?
Problem 3:
The output tensors calculated using batch operation vs sequential operation for the same input tensor and for the same weight is different which should not be the case. Again not sure what’s wrong here.
Output of qml.about()
.
qml.about()
Name: PennyLane
Version: 0.28.0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: c:\users\aksi01\miniconda3\envs\qns\lib\site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning, PennyLane-qiskit
Platform info: Windows-10-10.0.19045-SP0
Python version: 3.8.15
Numpy version: 1.22.3
Scipy version: 1.7.3
Installed devices:
- default.gaussian (PennyLane-0.28.0)
- default.mixed (PennyLane-0.28.0)
- default.qubit (PennyLane-0.28.0)
- default.qubit.autograd (PennyLane-0.28.0)
- default.qubit.jax (PennyLane-0.28.0)
- default.qubit.tf (PennyLane-0.28.0)
- default.qubit.torch (PennyLane-0.28.0)
- default.qutrit (PennyLane-0.28.0)
- null.qubit (PennyLane-0.28.0)
- lightning.qubit (PennyLane-Lightning-0.28.1)
- qiskit.aer (PennyLane-qiskit-0.29.0)
- qiskit.basicaer (PennyLane-qiskit-0.29.0)
- qiskit.ibmq (PennyLane-qiskit-0.29.0)
- qiskit.ibmq.circuit_runner (PennyLane-qiskit-0.29.0)
- qiskit.ibmq.sampler (PennyLane-qiskit-0.29.0)