Hello everyone,
I am trying to optimize a circuit to learn the GHZ state via the Quantum Earth Mover’s distance as defined in [2101.03037] Learning quantum data with the quantum Earth Mover's distance
(See section 2 for the presentation of the toy model and Appendix F for computational details).
However, I get the No gradients provided for any variable: (['Variable:0'],)
error when trying to optimize the circuit.
I suppose this might have something to do with the fact that the circuit is called when computing the distance, but I’m not sure.
Here’s the notebook for the code: quantum.py (3.5 KB)
Any help would be greatly appreciated!
Here is the code for the circuit:
@qml.qnode(dev, interface="tf", diff_method="backprop" )
def toy(op, params, H):
qml.RX(params[0], wires = 0)
qml.broadcast(qml.CRX, wires = range(n), pattern = "chain", parameters = params[1:n])
if op == 'state':
return qml.state()
else:
return qml.expval(H)
and here is the code for the GHZ state:
@qml.qnode(dev, interface="tf", diff_method="backprop" )
def GHZ(op,H):
qml.Hadamard(wires=0)
qml.broadcast(qml.CNOT, wires = range(n), pattern = "chain")
if op == 'state':
return qml.state()
else:
return qml.expval(H)
Here is the code for the QEM distance:
def exp(params, H):
a = toy('exp',params,H)
b = GHZ('exp',H)
diff = tf.abs(a - b)
return diff
def tensor_prod(k):
if k == 0:
prod = qml.PauliY(0)
else:
prod = qml.PauliX(0)
for i in range(1,k):
prod = prod @ qml.PauliX(i)
if k % 2 == 0:
prod = prod @ qml.PauliY(k)
else:
prod = prod @ qml.PauliX(k)
return prod
def dem(params):
expz = np.zeros(n)
values = np.zeros(n+1)
for i in range(n):
expz[i] = exp(params,qml.PauliZ(i))
expz = tf.cast(expz, tf.float32)
values[0] = tf.math.reduce_sum(expz)
for i in range(1,n):
values[i] = exp(params,tensor_prod(i))
dist = tf.keras.backend.max(values)
dist = tf.cast(dist, tf.float32)
return dist
And here is the code for the training:
x = tf.Variable(np.random.uniform(0,2*np.pi,n))
opt_dem = tf.keras.optimizers.Adam(learning_rate=0.2)
step = 30
for i in range(step):
with tf.GradientTape() as tape:
loss = dem(x)
vars = [x]
gradients = tape.gradient(loss, vars)
opt_dem.apply_gradients(zip(gradients, vars))