Hello! I am trying to use sklearn regression within my cost function, but that seems to give an error when I call clf.fit (clf is the sklearn regressor). Wanted to ask whether this is supposed to be supported? Below is my code:
import pennylane as qml
from pennylane import numpy as np
n_probes = 2
n_ancillas = 1
n_qubits = n_probes + n_ancillas
dev = qml.device("default.qubit", analytic=True, wires=n_probes + n_ancillas)
##############################################################################
# Cost
##############################################################################
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
@qml.qnode(dev)
def measure_full_sys(phi, params):
qml.RY(params[0],wires=0)
qml.RZ(phi,wires=0)
qml.RY(params[1],wires=0)
return qml.probs(wires=0)
#return qml.probs(wires=range(n_qubits))
def collect_grid_sample(phi_range, n_sample, params):
phis = np.arange(0,phi_range,phi_range/n_sample)
#data = np.zeros((n_sample,2))
res = np.empty(0)
for i in range(n_sample):
cur_phi = phis[i]
cur_zero_prob = measure_full_sys(cur_phi, params)[0].reshape(1)
#print(cur_zero_prob)
res = np.concatenate((res,cur_zero_prob))
data = np.concatenate((phis.reshape(n_sample, 1),res.reshape(-1,1)),axis=1)
#print(data)
return data
def collect_rand_sample(phi_range, n_sample, params):
phis = np.random.rand(n_sample)*phi_range
res = np.empty(0)
for i in range(n_sample):
cur_phi = phis[i]
cur_zero_prob = measure_full_sys(cur_phi, params)[0].reshape(1)
#print(cur_zero_prob)
res = np.concatenate((res,cur_zero_prob))
data = np.concatenate((phis.reshape(n_sample, 1),res.reshape(-1,1)),axis=1)
#print(data)
return data
def mse(params):
global iter_counter
# train the regressor
clf = LinearRegression()
train_data = collect_grid_sample(0.1,100,params)
test_data = collect_rand_sample(0.1,100,params)
X_train = train_data[:,1].reshape(-1,1)
y_train = train_data[:,0]
X_test = test_data[:,1].reshape(-1,1)
y_test = test_data[:,0]
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
test_mse = mean_squared_error(pred, y_test)
if (iter_counter % 20 == 0):
print("cur test RMSE: "+str(np.sqrt(test_mse)))
train_pred = clf.predict(X_train)
train_mse = mean_squared_error(train_pred,y_train)
print("cur train RMSE:" +str(np.sqrt(train_mse)))
print("cur theta values:")
print(params)
plt.scatter(X_train, y_train,color='g', s=1)
plt.scatter(X_test, y_test, color='b',s=1)
plt.xlabel('Pr(0)')
plt.ylabel('Phi')
plt.show()
iter_counter += 1
return test_mse
##############################################################################
# Optimization
##############################################################################
steps = 200
params_init = np.random.rand(2)
gd_cost = []
opt = qml.RMSPropOptimizer(0.01)
theta = params_init
for _ in range(steps):
theta = opt.step(mse, theta)
gd_cost.append(mse(theta))