Hello! I was looking forward to run some tests executing the same kernel on different backends. The purpose is to compare the ideal performance of a quantum feature extractor to noisy simulations on simulated QPUs. I don’t really want to run code on simulators (like the ones available on AWS, Rigetti cloud etc…), I was mostly looking into injecting noise using “ready-made” real qpus noise profiles in my kernel to conduct a systematic analysis without having to potentially pay. Ideally, I would like to treat the backends as hyperparameters, reusing the same code with different simulated noisy backends and setting them using config files.
The dummy kernel i have coded is meant to encode channel informations from an EEG using the amplitude encoding and iteratively encode timestep informations after applying a strongly entanglement template with random weights. The eeg is cut in small sliding windows, which should be executed in parallel batches just like we would do in pytorch. Here’s the code:
import pennylane as qml
import torch
from math import log2, pi
import logging
import os
# Logging configuration
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# 1. Pre-processing and Post-processing Functions (Data Logic Side)
# -----------------------------------------------------------------------------
def preprocess_data(raw_eeg_batch: torch.Tensor, sliding_window_length: int) -> tuple[torch.Tensor, int, int]:
"""
Prepares EEG data: trims, windows, and flattens the batch for the quantum circuit.
"""
batch_size = raw_eeg_batch.shape[0]
total_timesteps = raw_eeg_batch.shape[1]
n_channels = raw_eeg_batch.shape[2]
log.info(f"Preprocessing: Input shape {raw_eeg_batch.shape}")
# Calculate how many full windows fit
n_windows = total_timesteps // sliding_window_length
# Calculate remainder and trim excess data
timesteps_to_trim = total_timesteps % sliding_window_length
valid_length = total_timesteps - timesteps_to_trim
if timesteps_to_trim > 0:
trimmed_tensor = raw_eeg_batch[:, :valid_length, :]
else:
trimmed_tensor = raw_eeg_batch
# Reshape into windows: [Batch, N_Windows, Window_Len, Channels]
windowed_tensor = trimmed_tensor.view(
batch_size, n_windows, sliding_window_length, n_channels
)
# Flattening: [Batch * N_Windows, Window_Len, Channels]
# This is the tensor that will enter the quantum circuit
circuit_input = windowed_tensor.reshape(-1, sliding_window_length, n_channels)
log.info(f"Preprocessing: Circuit input shape {circuit_input.shape}")
return circuit_input, batch_size, n_windows
def postprocess_data(qnode_output: torch.Tensor, batch_size: int, n_windows: int, n_qubits: int) -> torch.Tensor:
"""
Reconstructs the original shape from circuit results.
PennyLane with Torch interface usually returns [n_qubits, batch_total] or [batch_total, n_qubits]
depending on version/config. Here we handle the standard stack.
"""
# If output is a list of tensors (one per qubit), stack them
if isinstance(qnode_output, list):
stacked_output = torch.stack(qnode_output) # [n_qubits, total_batch_size]
transposed_output = stacked_output.T # [total_batch_size, n_qubits]
else:
# If it is already a tensor)
transposed_output = qnode_output
log.info(f"Postprocessing: Raw output shape {transposed_output.shape}")
# Final reshape: [Batch, N_Windows, Features (Qubits)]
reassembled_tensor = transposed_output.reshape(batch_size, n_windows, n_qubits)
log.info(f"Postprocessing: Final shape {reassembled_tensor.shape}")
return reassembled_tensor
# -----------------------------------------------------------------------------
# 2. Quantum Kernel (Pure Circuit Logic)
# -----------------------------------------------------------------------------
class AmplitudeReUploadKernel:
"""
Contains the circuit definition, weights, and device setup.
"""
def __init__(self, n_channels, sliding_window_length):
self.n_channels = n_channels
self.n_qubits = round(log2(n_channels))
self.sliding_window_length = sliding_window_length
# Initialize random weights for parametrized layers
# Shape: [Window_Len, Layers=1, Qubits, Axis=3]
self.weights = torch.rand(self.sliding_window_length, 1, self.n_qubits, 3) * 2 * pi
# Device setup
self._setup_device()
# QNode setup
self.qnode = self._create_qnode()
def _setup_device(self):
# Example using IonQ simulator
self.dev = qml.device("ionq.simulator", wires=6, shots=1000)
def _create_qnode(self):
@qml.qnode(self.dev, interface="torch")
def circuit(inputs, weights):
"""
inputs shape: [batch_size, sliding_window_length, n_channels]
The input is 'implicitly' batched by PennyLane/Torch.
We iterate over the time dimension (sliding_window_length).
"""
# Data Re-uploading Loop
for t in range(self.sliding_window_length):
# 1. Encoding (Amplitude Embedding)
# inputs[:, t, :] takes the t-th time slice for the whole batch
qml.AmplitudeEmbedding(features=inputs[:, t, :], wires=range(self.n_qubits), normalize=True, pad_with=0.)
# 2. Processing (Variational Layers)
qml.StronglyEntanglingLayers(weights[t], wires=range(self.n_qubits))
# Visual barrier (optional, useful for visual debugging)
# qml.Barrier(wires=range(self.n_qubits))
return [qml.expval(qml.PauliZ(i)) for i in range(self.n_qubits)]
# Using broadcast_expand for batch execution if needed
return qml.transforms.broadcast_expand(circuit)
def forward(self, x):
"""Executes the circuit"""
return self.qnode(x, self.weights)
# -----------------------------------------------------------------------------
# 3. Main Execution Workflow
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# --- Configuration ---
CONFIG = {
'n_channels': 64,
'sliding_window_length': 4,
'batch_size': 16, # Realistic batch size
'signal_len': 128, # Time signal length
}
# 1. Dummy Data Generation (Batch, Time, Channels)
raw_eeg = torch.rand(CONFIG['batch_size'], CONFIG['signal_len'], CONFIG['n_channels'])
# 2. Preprocessing (In Main)
processed_input, original_bs, n_wins = preprocess_data(
raw_eeg,
CONFIG['sliding_window_length']
)
# 3. Kernel Initialization
kernel = AmplitudeReUploadKernel(
n_channels=CONFIG['n_channels'],
sliding_window_length=CONFIG['sliding_window_length'],
)
# 4. Circuit Execution (Benchmark here)
log.info("Starting quantum circuit execution...")
quantum_output = kernel.forward(processed_input)
# 5. Postprocessing (In Main)
final_output = postprocess_data(
quantum_output,
batch_size=original_bs,
n_windows=n_wins,
n_qubits=kernel.n_qubits
)
print(f"\nFinal Result:")
print(f"Input Shape: {raw_eeg.shape}")
print(f"Output Shape: {final_output.shape} (Batch, Windows, Qubits)")
in this case, i was testing the IonQ plugin to see whether it worked. It seems to be working, but if I am right there is no way to make this simulation noisy, am I right?
I’ve tried rigetti too, but I have seen that it has been deprecated since version 0.40.0. I have attached the error anyway. code is the same, the only different thing is of course the device which i set to self.dev = qml.device("rigetti.qvm", device="6q", noisy=True)
Traceback (most recent call last):
File "/home/lollo/Desktop/eeg-attencion/backends/rigetti.py", line 151, in <module>
kernel = AmplitudeReUploadKernel(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lollo/Desktop/eeg-attencion/backends/rigetti.py", line 90, in __init__
self._setup_device()
File "/home/lollo/Desktop/eeg-attencion/backends/rigetti.py", line 96, in _setup_device
self.dev = qml.device("rigetti.qvm", device="6q", noisy=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lollo/Desktop/eeg-attencion/backends/.venv/lib/python3.11/site-packages/pennylane/devices/device_constructor.py", line 244, in device
plugin_device_class = plugin_devices[name].load()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lollo/.local/share/uv/python/cpython-3.11.13-linux-x86_64-gnu/lib/python3.11/importlib/metadata/__init__.py", line 202, in load
module = import_module(match.group('module'))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lollo/.local/share/uv/python/cpython-3.11.13-linux-x86_64-gnu/lib/python3.11/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/home/lollo/Desktop/eeg-attencion/backends/.venv/lib/python3.11/site-packages/pennylane_rigetti/__init__.py", line 7, in <module>
from .qpu import QPUDevice
File "/home/lollo/Desktop/eeg-attencion/backends/.venv/lib/python3.11/site-packages/pennylane_rigetti/qpu.py", line 26, in <module>
from pennylane.measurements import Expectation
ImportError: cannot import name 'Expectation' from 'pennylane.measurements' (/home/lollo/Desktop/eeg-attencion/backends/.venv/lib/python3.11/site-packages/pennylane/measurements/__init__.py)
I’m on Pennylane 0.43.1
Name: pennylane
Version: 0.43.1
Location: /home/lollo/Desktop/eeg-attencion/backends/.venv/lib/python3.11/site-packages
Requires: appdirs, autograd, autoray, cachetools, diastatic-malt, networkx, numpy, packaging, pennylane-lightning, requests, rustworkx, scipy, tomlkit, typing-extensions
Required-by: pennylane-ionq, pennylane-lightning, pennylane-rigetti
I’m planning to study the docs pennylane-qiskit and pytket-pennylane (for Quantinuum QPUs simulations) too, but right now i’m a bit swamped. Am I on the right track or there’s something i’m missing?
Thank you for your help!
EDIT: I have removed strawberry fields as it is stated that it is not compatible with my Pennylane version.