Been using Pennylane for a optimization task and seem to be getting the following error: UserWarning: Output seems independent of input.
I have checked various things on how the loss is being calculated and if all examples are being passed in correctly but cant seem to find the issue. I have pasted my code file below - cant attach it since I am a new user. Thanks a lot for taking a look at this.
import pennylane as qml
from pennylane import numpy as np
from sklearn.metrics import log_loss
from pennylane.optimize import AdamOptimizer
#set parameters of the model
n_qubits = 8
n_features = 16
n_parameters = 15
feature_range = (0, 2*np.pi)
train_size = 150
test_size = 150
qubits = list(range(n_qubits))
def initialize_parameters(min_range, max_range, n_parameters):
params = np.random.uniform(low = min_range, high = max_range, size = n_parameters)
return params
#Import the data which I have ommitted
x_train = np.random.uniform(size = (150,16))
x_test = np.random.uniform(size = (150,16))
y_train = np.array([0]*75 + [1]*75)
y_test = np.array([0]*75 + [1]*75)
dev = qml.device("default.qubit", wires=8)
@qml.qnode(dev)
def qnn(data, theta):
#data encoding
for i in range((n_features // 2)):
qml.RX(data[i], wires = i)
for i in range((n_features // 2)):
qml.RY(data[i+n_qubits], wires = i)
#variational
theta_counter = 0
for i, q in enumerate(qubits, start = theta_counter):
qml.RX(theta[i], wires = q)
theta_counter = i
for q1, q2 in zip(qubits[0::2], qubits[1::2]):
qml.CZ(wires=[q1,q2])
for i, q in enumerate(qubits[1::2], start = theta_counter+1):
qml.RY(theta[i], wires = q)
theta_counter = i
qml.CZ(wires = [1, 3])
qml.CZ(wires = [5, 7])
for i, q in enumerate(qubits[3::4], start = theta_counter+1):
qml.RX(theta[i], wires = q)
theta_counter = i
qml.CZ(wires=[3, 7])
for i, q in enumerate(qubits[7::8], start = theta_counter+1):
qml.RY(theta[i], wires=q)
theta_counter = i
return qml.expval(qml.PauliZ(7))
def compute_cost(params, x, y):
y_pred = [qnn(x[i], params) for i in range(x.shape[0])]
yhat = [1 if x > 0 else 0 for x in y_pred]
return np.array(log_loss(y, yhat))
#training the model
epochs = 10
opt = AdamOptimizer(stepsize= 0.01, beta1=0.9, beta2=0.999)
params = initialize_parameters(feature_range[0], feature_range[1], n_parameters)
loss = compute_cost(params, x_train, y_train)
print("Epoch: {:2d} | Cost: {:3f}".format( 0, loss ))
for it in range(epochs):
params, loss = opt.step_and_cost(lambda v: compute_cost(v, x_train, y_train), params)
res = [it + 1, loss]
print("Epoch: {:2d} | Loss: {:3f}".format(*res))