Hello!
I’m using the recent GQSP functionality to implement Hamiltonian simulation of a qubitized operator (cf. Corollary 8 of https://arxiv.org/pdf/2308.01501). To the best of my understanding, the reference construction is to approximate the function e^{-i \alpha t \cos\theta}, equivalently e^{-i \alpha t (z + z^{-1})/2} with z = e^{i\theta}, by a Laurent polynomial L_d(z) = \sum_{n=-d}^{d} c_n z^n. Since GQSP works with ordinary polynomials, this quantity must be shifted by z^d, giving P_d(z) = z^d L_d(z), which can later be compensated for by multiplying by the corresponding power of the walk operator.
The approximation error of the shifted Laurent polynomial decreases as the degree increases. However, the realization error (in the sense of the operator-norm difference between the actual unitary block implemented by GQSP \mathrm{GQSP}(W) and the expected truncated shifted Laurent polynomial P_d(W)) exhibits a non-monotonic dependence on the degree: it first decreases up to about degree 20, and then increases rapidly to unusable values.
The issue might be related to the polynomial admissibility condition (e.g. Issue with qml.poly_to_angles(poly, "GQSP") ), although I have checked that the realized polynomial on the relevant arc is bounded away from \pm 1.
This is my configuration:
import sys
import pennylane as qml
import numpy as np
import scipy as sc
import matplotlib
import matplotlib.pyplot as plt
print(f"Python version: {sys.version:>50s}")
print(f"Pennylane version: {qml.__version__:>50s}")
print(f"Numpy version: {np.__version__:>50s}")
print(f"Scipy version: {sc.__version__:>50s}")
print(f"matplotlib version:{matplotlib.__version__:>50s}")
# Python version: 3.14.4 (main, Apr 8 2026, 09:50:55) [GCC 11.4.0]
# Pennylane version: 0.44.1
# Numpy version: 2.4.4
# Scipy version: 1.17.1
# matplotlib version: 3.10.8
The coefficients and angles are computed from the Bessel-function expansion. If coefficients leads to a polynomial that violates the GQSP boundedness condition, poly_to_angles raises error ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (2,) + inhomogeneous part and that requires scaling down the magnitude of the coefficients.
def _get_hamiltonian_simulation_coeffs_and_angles(
deg: int, alpha: float, t: float, retry=1_000_000
):
"""
Build the shifted Laurent truncation for qubitized Hamiltonian simulation and its GQSP angles.
:param deg: degree of the polynomial
:param alpha: 1-norm of the Hamiltonian
:param t: hamiltonian simulation time
:param retry: number of times you can make the coefficient smaller to compensate GQSP constraints
"""
if deg < 0:
raise ValueError(f"`deg` must be non-negative, got {deg}.")
if retry < 1:
raise ValueError(f"`retry` must be at least 1, got {retry}.")
if not np.isfinite(alpha):
raise ValueError(f"`alpha` must be finite, got {alpha}.")
if not np.isfinite(t):
raise ValueError(f"`t` must be finite, got {t}.")
beta = alpha * t
# Laurent truncation:
# L_d(z) = sum_{n=-d}^d (-i)^n J_n(beta) z^n
#
# GQSP expects an ordinary polynomial, so shift by z^d:
# P_d(z) = z^d L_d(z) = sum_{m=0}^{2d} c_m z^m
# with c_m = (-i)^(m-d) J_{m-d}(beta).
n = np.arange(-deg, deg + 1)
coeffs = np.array([(-1j) ** k * sc.special.jv(k, beta) for k in n], dtype=complex)
if coeffs.shape != (2 * deg + 1,):
raise RuntimeError(f"Internal error: expected {2 * deg + 1} coefficients, got shape {coeffs.shape}.")
if not np.all(np.isfinite(coeffs)):
raise RuntimeError("Initial Laurent coefficients contain non-finite values.")
for k in range(retry):
try:
angles = qml.poly_to_angles(coeffs, "GQSP")
except (AssertionError, ValueError) as exc:
coeffs *= 1 - 1e-6
continue
if angles is None or not np.all(np.isfinite(angles)):
raise RuntimeError("Computed GQSP angles are invalid.")
return coeffs, angles
raise RuntimeError(f"The polynomial at deg {deg} is not numerically compatible with GQSP after {retry} retries.")
The rest of the code is a standard implementation of GQSP applied to the qubitized walk operator for the Hamiltonian H = Z + X. The total error is upper-bounded by the sum of the polynomial approximation error and the error in realizing that polynomial with GQSP.
def _close_up_to_phase(A, B):
"""
Check if the matrices are close up to a harmless global phase.
"""
overlap = np.vdot(A.ravel(), B.ravel())
phase = 1.0 if np.isclose(overlap, 0.0) else overlap / np.abs(overlap)
return np.allclose(A, phase * B, atol=1e-8)
def error_breakdown_on_gqsp_qubitized_hsim(d: int):
"""Compute approximation, realization, and total operator-norm errors."""
# The circuit has three qubits
gqsp_ctrl = 0
walk_ctrl = 1
sys_wire = 2
# H = Z + X is the Hamiltonian acting on the system qubit
H = qml.dot([1.0, 1.0], [qml.Z(sys_wire), qml.X(sys_wire)])
# For H = Z + X, the LCU 1-norm is alpha = |1| + |1| = 2.
alpha = 2.0
# Qubitized walk operator W
walk_op = qml.Qubitization(H, control=[walk_ctrl])
W = qml.matrix(walk_op, wire_order=[walk_ctrl, sys_wire])
# Fixed time for the Hamiltonian-simulation polynomial
tau = 0.5
# Get the actually realized polynomial coefficients and GQSP angles
coeffs, angles = _get_hamiltonian_simulation_coeffs_and_angles(d, alpha=alpha, t=tau)
# Full 8x8 GQSP unitary on [gqsp_ctrl, walk_ctrl, sys_wire]
U_gqsp = qml.matrix(
qml.GQSP(walk_op, angles, control=gqsp_ctrl),
wire_order=[gqsp_ctrl, walk_ctrl, sys_wire],
)
# The |0><0| block of the extra GQSP control qubit.
# Since W acts on 2 qubits, this block is the top-left 4x4 corner.
U_gqsp_ketbra0 = U_gqsp[:4, :4]
# Sanity check: make sure that the [:4, :4] block is exactly the one we want
# use P(x) = 0.5 * (1 + x), so the GQSP block should reproduce 0.5 * (I + W) rather than W itself.
sanity_coeffs = np.array([1.0, 1.0], dtype=complex)
sanity_scale = 0.5
sanity_angles = qml.poly_to_angles(sanity_scale * sanity_coeffs, "GQSP")
sanity_U_gqsp = qml.matrix(
qml.GQSP(walk_op, sanity_angles, control=gqsp_ctrl),
wire_order=[gqsp_ctrl, walk_ctrl, sys_wire],
)
sanity_U_gqsp_ketbra0 = sanity_U_gqsp[:4, :4]
sanity_target = sanity_scale * (np.eye(W.shape[0], dtype=complex) + W)
assert _close_up_to_phase(sanity_U_gqsp_ketbra0, sanity_target), "Wrong qubit order or wrong block extraction."
# Polynomial realized by GQSP in the top-left block sum_i c_i W^i.
# Because coeffs are the shifted Laurent coefficients, this equals W^d L_d(W).
P_of_W = sum(c * np.linalg.matrix_power(W, n) for n, c in enumerate(coeffs))
# Correct shifted target for qubitized Hamiltonian simulation:
# W^d exp[-i alpha tau (W + W^\dagger)/2]
# with alpha = 2 for H = Z + X.
alpha = 2.0
target = np.linalg.matrix_power(W, d) @ sc.linalg.expm(
-1j * alpha * tau * (W + W.conj().T) / 2
)
# Error decomposition
approximation_error = np.linalg.norm(P_of_W - target, ord=2)
realization_error = np.linalg.norm(U_gqsp_ketbra0 - P_of_W, ord=2)
total_error = np.linalg.norm(U_gqsp_ketbra0 - target, ord=2)
return {
"approximation_error": approximation_error,
"realization_error": realization_error,
"total_error": total_error
}
Plotting these quantities as functions of the degree d produces the error curves shown at the end.
degrees = np.arange(2, 31, 2) # Degree must be even (right?)
approx_errors = []
realization_errors = []
total_errors = []
for d in degrees:
err = error_breakdown_on_gqsp_qubitized_hsim(d)
approx_errors.append(err["approximation_error"])
realization_errors.append(err["realization_error"])
total_errors.append(err["total_error"])
plt.figure(figsize=(7, 4.5))
plt.plot(degrees, total_errors, color="black", linewidth=2.5, label="total")
plt.plot(degrees, realization_errors, ':', color="red", linewidth=2.0, label="realization")
plt.plot(degrees, approx_errors, ':', color="blue", linewidth=2.0, label="approximation")
plt.yscale("log")
plt.xlabel("degree")
plt.ylabel("error")
plt.title("GQSP qubitized Hamiltonian simulation error breakdown")
plt.grid(True, which="both", alpha=0.3)
plt.legend()
plt.tight_layout()
plt.show()
It results in:
Following the advice at Issue with qml.poly_to_angles(poly, "GQSP") , a minor modification of method _get_hamiltonian_simulation_coeffs_and_angles to:
n = np.arange(-deg, deg + 1)
coeffs = np.array([(-1j) ** k * sc.special.jv(k, beta) for k in n], dtype=complex)
coeffs -= 1e-17 # <-- added!
is not decreasing the overall error below 1e-11 despite increasing d:
Is this behavior expected, or am I missing a piece of the construction here?
Thank you very much for your help.


