Hello! I have been running this optimization with gradient descent - calculating the cost function gives no error, but in the optimization loop I ran into “Unsupported type in the model: <class ‘autograd.builtins.SequenceBox’>” from unflatten.
import pennylane as qml
from pennylane import numpy as np
def xyzyz_layer(params, n_qubits): #params is matrix of n_qubits*5
# first Rx layer
for i in range(n_qubits):
qml.RX(params[i,0],wires=i)
# now chained c-Ry layer
if n_qubits> 1:
for i in range(n_qubits):
if i+1 < n_qubits:
ctrl_idx = i+1
else:
ctrl_idx = 0
qml.CRY(params[i,1],wires=[ctrl_idx,i])
# Rz layer
for i in range(n_qubits):
qml.RZ(params[i,2],wires=i)
# now chained c-Ry layer
if n_qubits> 1:
for i in range(n_qubits):
if i+1 < n_qubits:
ctrl_idx = i+1
else:
ctrl_idx = 0
qml.CRY(params[i,3],wires=[ctrl_idx,i])
# Rz layer
for i in range(n_qubits):
qml.RZ(params[i,4],wires=i)
def xyzy_layer(params, n_qubits): #params is matrix of n_qubits*4
# first Rx layer
for i in range(n_qubits):
qml.RX(params[i,0],wires=i)
# now chained c-Ry layer
if n_qubits> 1:
for i in range(n_qubits):
if i+1 < n_qubits:
ctrl_idx = i+1
else:
ctrl_idx = 0
qml.CRY(params[i,1],wires=[ctrl_idx,i])
# Rz layer
for i in range(n_qubits):
qml.RZ(params[i,2],wires=i)
# now chained c-Ry layer
if n_qubits> 1:
for i in range(n_qubits):
if i+1 < n_qubits:
ctrl_idx = i+1
else:
ctrl_idx = 0
qml.CRY(params[i,3],wires=[ctrl_idx,i])
return
def encoder_ansatz(params, n_qubits): # for now, just let encoder be one xyzyz layer
xyzyz_layer(params, n_qubits)
return
def decoder_ansatz(params, n_qubits): # for now, just let decoder be one xyzy layer
xyzy_layer(params, n_qubits)
return
def accumulate_signal(phi, n_probes): # assume qubits 0-n_probes-1 are probes
for i in range(n_probes):
qml.RZ(phi, wires=i)
n_probes = 2
n_ancillas = 1
n_qubits = n_probes+n_ancillas
dev = qml.device('default.qubit', analytic=True, wires=n_probes+n_ancillas)
@qml.qnode(dev)
def measure_full_sys(phi, params):
enc = params[:5*n_qubits].reshape(n_qubits,5)
dec = params[-4*n_qubits:].reshape(n_qubits,4)
encoder_ansatz(enc, n_qubits)
accumulate_signal(phi, n_probes)
decoder_ansatz(dec, n_qubits)
return qml.probs(wires=range(n_qubits))
def CFI_full(phi, params):
probs = measure_full_sys(phi, params) # len=2**n_qubits
grad_phi = qml.jacobian(measure_full_sys,argnum=0)# len=2**n_qubits
grad_val = grad_phi(phi,params)
cfi = np.sum(grad_val**2/probs) # each term is grad^2/prob
return cfi
def avg_CFI_full(params):
phi_vec = [0.1]
avg_cfi = 0
for i in range(len(phi_vec)):
avg_cfi += CFI_full(phi_vec[i],params)
avg_cfi /= len(phi_vec)
return -avg_cfi
steps = 200
params_init = np.random.rand(n_qubits*9)
gd_cost = []
opt = qml.RMSPropOptimizer(0.01)
theta = params_init
for _ in range(steps):
theta = opt.step(avg_CFI_full, theta)
gd_cost.append(avg_CFI_full(theta))
The error I am getting is:
when I am calculating jacobian in my cost function (note that even without optimization my cost function includes a jacobian evaluation), it calls _flatten somewhere and that throws an “unsupported type SequenceBox” error.
However, when I am running the cost function without doing gradient descent it gives no error.