Hello,
I’m trying to build a neural network layer, similar to the paper [1806.06871] Continuous-variable quantum neural networks for the sine fitting example. But after getting rid of all problems, it seems there is no gradient, which i do not understand. I already looked up various examples from SF demos and applied them, but I#d be very grateful for help. Here is what I’ve done:
batch_size = 10
#dataset
X = np.linspace(-1.0, 1.0, num=batch_size)
Y = np.sin(X)
X = tf.convert_to_tensor(X)
Y = tf.convert_to_tensor(Y)
prog = sf.Program(1)
cutoff = 10
# create symbolic parameter for data input displacement
alpha_d = prog.params("alpha_d")
# create symbolic ones for layer
theta_1a, theta_1b , s_1, alpha_1, kappa_1= prog.params("theta_1a", "theta_1b", "s_1", "alpha_1","kappa_1")
with prog.context as q:
# States
Dgate(alpha_d) | q[0]
# Gates
Rgate(theta_1a) | q[0]
Sgate(s_1) | q[0]
Rgate(theta_1b) | q[0]
Dgate(alpha_1) | q[0]
Kgate(kappa_1)| q[0]
# Measurements
MeasureHomodyne(0.0) | q[0]
eng = sf.Engine(backend="tf", backend_options={"cutoff_dim": cutoff, "batch_size":batch_size})
opt = tf.keras.optimizers.Adam(learning_rate=0.1)
steps = 50
theta_1a = tf.Variable(1.0)
theta_1b = tf.Variable(1.0)
s_1 = tf.Variable(1.0)
alpha_1 = tf.Variable(1.0)
kappa_1 = tf.Variable(0.1)
mapping = {"alpha_d": X,
"theta_1a": theta_1a, "theta_1b": theta_1b, "s_1": s_1,
"alpha_1": alpha_1, "kappa_1": kappa_1}
def loss():
# This is the Least-Squares-Loss
# reset the engine if it has already been executed
if eng.run_progs:
eng.reset()
# execute the engine
results = eng.run(prog, args=mapping)
results = tf.reshape(results.samples,[batch_size])
print(results)
loss = 0
for l, p in zip(Y, results):
print(l,p, loss)
loss = loss + (l - p) ** 2
loss = loss / len(Y)
return loss
for step in range(steps):
_ = opt.minimize(loss, [theta_1a, theta_1b , s_1, alpha_1, kappa_1])
parameter_vals = [theta_1a.numpy(), theta_1b.numpy() , s_1.numpy(), alpha_1.numpy(), kappa_1.numpy()]
print("Parameter values at step {}: {}".format(step, parameter_vals))
eng.reset()
The error message is:
ValueError: No gradients provided for any variable: (['Variable:0', 'Variable:0', 'Variable:0', 'Variable:0', 'Variable:0'],). Provided
grads_and_vars is ((None, <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>), (None, <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>), (None, <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>), (None, <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>), (None, <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.1>)).