Hello all,
So, i have builded a 2 qubit Variational Classifier as in the demo. It runs very well with Iris datasets but when i try to import a different dataset from scikit although dataset has exactly the same format i get error
TypeError: float() argument must be a string or a number, not 'ArrayBox'
ValueError: setting an array element with a sequence.
The code i used to generate dataset is the following
from sklearn.datasets import make_blobs
from matplotlib import pyplot
from pandas import DataFrame
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2)
# scatter plot, dots colored by class value
df = DataFrame(dict(x=X[:,0], y=X[:,1], label=y))
y[y == 0] = -1 # map zeroes to -1
Xnorm = minmax_scale(X, feature_range=(0, np.pi))
X_train_val, X_test, y_train_val, y_test = train_test_split(Xnorm, y, test_size=0.5)
Any ideas on how to solve this? Thanks in advance!!
Complete code in the next comment.
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import minmax_scale
from sklearn.datasets import make_blobs
from matplotlib import pyplot
from pandas import DataFrame
import pennylane as qml
from pennylane.templates.embeddings import AngleEmbedding
from pennylane.templates.layers import StronglyEntanglingLayers
from pennylane.init import strong_ent_layers_uniform
from pennylane.optimize import GradientDescentOptimizer
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2)
# scaling and splitting dataset
Xnorm = minmax_scale(X, feature_range=(0, np.pi))
X_train_val, X_test, y_train_val, y_test = train_test_split(Xnorm, y, test_size=0.5)
# number of qubits is equal to the number of features
n_qubits = X.shape[1]
n_qubits= 2
# quantum device handle
dev = qml.device("default.qubit", wires=n_qubits)
# quantum circuit
@qml.qnode(dev)
def circuit(weights, x=None):
AngleEmbedding(x, wires = range(n_qubits))
StronglyEntanglingLayers(weights, wires = range(n_qubits))
return qml.expval(qml.PauliZ(0))
# variational quantum classifier
def variational_classifier(theta, x=None):
weights = theta[0]
bias = theta[1]
return circuit(weights, x=x) + bias
def cost(theta, X, expectations):
e_predicted = \
np.array([variational_classifier(theta, x=x) for x in X])
loss = np.mean((e_predicted - expectations)**2)
return loss
# number of quantum layers
n_layers = 3
# split into train and validation
X_train, X_validation, y_train, y_validation = \
train_test_split(X_train_val, y_train_val, test_size=0.50)
# convert classes to expectations: 0 to -1, 1 to +1
e_train = np.empty_like(y_train)
e_train[y_train == -1] = -1
e_train[y_train == 1] = +1
# select learning batch size
batch_size = 5
# calculate numbe of batches
batches = len(X_train) // batch_size
# select number of epochs
n_epochs = 10
# draw random quantum node weights
theta_weights = strong_ent_layers_uniform(n_layers, n_qubits, seed=42)
theta_bias = 0.0
theta_init = (theta_weights, theta_bias) # initial weights
theta = theta_init
# start of main learning loop
# build the optimizer object
pennylane_opt = GradientDescentOptimizer()
# split training data into batches
X_batches = np.array_split(np.arange(len(X_train)), batches)
for it, batch_index in enumerate(chain(*(n_epochs * [X_batches]))):
# Update the weights by one optimizer step
batch_cost = \
lambda theta: cost(theta, X_train[batch_index], e_train[batch_index])
theta = pennylane_opt.step(batch_cost, theta)
# end of learning loop
print(
"Iter: {:5d} | "
"".format(it + 1))
Hi @NikSchet,
In PennyLane, internally we are using a custom ndarray
class (called tensor
) which subclasses numpy
's ndarray
. It is useful because differentiable parameters can be tracked easily (note the requires_grad
attribute).
In order to use the custom tensor
class internally, numpy
has to be imported from PennyLane: from pennylane import numpy as np
.
This should help out with your case, this was also a subtle difference between your code and the demo.
2 Likes
Thank you very much that fixed the problem.