I am trying to optimize a circuit using this loss function \left\langle {{\Psi}\left( {{{\mathbf{\theta }}}} \right)\left| {\hat H} \right|{\Psi}\left( {{{\mathbf{\theta }}}} \right)} \right\rangle + \beta \left| {\left\langle {{\Psi}\left( {{{\mathbf{\theta }}}} \right)\left| {{\Psi}_0} \right.} \right\rangle } \right|^2
I think it would make sense if the circuit return qml.state()
rather than qml.expval(H)
because I have to use state
to calculate the 2nd term. However, Pennylane seems not supporting it. I receive
ValueError: Computing the gradient of circuits that return the state with the parameter-shift rule gradient transform is not supported, as it is a hardware-compatible method.
Is there a way to circumvent this problem
EDIT: Here I am using lightning.qubit
, but default.qubit
also didn’t work
Below is my code
import pennylane as qml
from pennylane import numpy as np
import optax
import jax
dev = qml.device("lightning.qubit", wires=qubits)
h2_dataset = qml.data.load("qchem", molname="H2", bondlength=0.742, basis="STO-3G")
h2 = h2_dataset[0]
H, qubits = h2.hamiltonian, len(h2.hamiltonian.wires)
@qml.qnode(dev)
def circuit(param):
qml.BasisState(h2.hf_state, wires=range(qubits))
qml.DoubleExcitation(param, wires=[0, 1, 2, 3])
return qml.state()
def loss_fn_1(theta):
"""
Pure expectation value
"""
state = circuit(theta)
return state.conj().T @ hamiltonian_to_matrix(h2.hamiltonian, h2.hamiltonian.wires) @ state
theta = np.array(0.)
# store the values of the cost function
energy = [loss_fn_1(theta)]
conv_tol = 1e-6
max_iterations = 100
opt = optax.sgd(learning_rate=0.4)
# store the values of the circuit parameter
angle = [theta]
opt_state = opt.init(theta)
for n in range(max_iterations):
gradient = jax.grad(loss_fn_1)(theta)
updates, opt_state = opt.update(gradient, opt_state)
theta = optax.apply_updates(theta, updates)
angle.append(theta)
energy.append(loss_fn_1(theta))
conv = np.abs(energy[-1] - energy[-2])
if n % 2 == 0:
print(f"Step = {n}, Energy = {energy[-1]:.8f} Ha")
if conv <= conv_tol:
break