Hello! I am trying to train a model to classify two classes using Autograd, but I encountered an error message: “ValueError: Autograd does not support differentiation of ints.” I don’t know where I am using integers because my cost function returns an array with requires_grad = True.
Also, I am using one observable for both classes in the data.
Can anybody help me? Please
Code:
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import AdamOptimizer, GradientDescentOptimizer
import matplotlib.pyplot as plt
from matplotlib import colors
def Data_generate(samples, center=[[-1.0, 1.0],[1.0, -1.0]], radius=1.1):
"""
Generates a dataset of points with 1/0 labels.
Args:
samples (int): number of samples to generate
center (tuple): center of the circle
radius (float: radius of the circle
Returns:
Xvals (array[tuple]): coordinates of points
yvals (array[int]): classification labels
"""
Xvals, yvals = [], []
for i in range(samples):
x = 2 * (np.random.rand(2)) - 1
y = 0
if np.linalg.norm(x - center[0]) < radius or np.linalg.norm(x - center[1]) < radius:
y = 1
Xvals.append(x)
yvals.append(y)
return np.array(Xvals, requires_grad=False), np.array(yvals, requires_grad=True)
def plot_data(x, y, fig=None, ax=None):
"""
Plot data with red/blue values for a binary classification.
"""
if fig == None:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.scatter(x[:, 0], x[:, 1], c=y, cmap=colors.ListedColormap(['red','blue']), s=20, edgecolor="k")
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
Xdata, ydata = Data_generate(1000)
#fig, ax = plt.subplots(1, 1, figsize=(4, 4))
#plot_data(Xdata, ydata, fig=fig, ax=ax)
#plt.show()
# Put everything together.
dev = qml.device("default.qubit", wires=2)
#the observables now have to be invriant under SWAP and X gates
label_0 = [[1], [1],[1], [1]]
label_1 = [[1], [-1],[-1], [1]]
state_labels = np.array([label_0, label_1], requires_grad=False)/2
@qml.qnode(dev, interface="autograd")
def qcircuit(params, x, y):
# prepare initial state in a invariant state
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
for p in params:
#embedding
qml.RZ(x[0], wires=0)
qml.RZ(x[1], wires=1)
#Since exp(X_1+X_2) = RX_1 \cross RX_2
qml.RX(p[0], wires=0)
qml.RX(p[0], wires=1)
#Rzz gates
qml.CNOT([0,1])
qml.RZ(p[1], wires=1)
qml.CNOT([0,1])
return qml.expval(qml.Hermitian(y, wires=[0,1]))
def density_matrix(state):
return state * np.conj(state).T
def cost(params, x, y, state_labels=None):
# Compute prediction for each input in data batch
loss = 0.0
dm_labels = [density_matrix(s) for s in state_labels]
for i in range(len(x)):
ypred = qcircuit(params, x[i], dm_labels[0])
loss = loss -y[i] * np.log(ypred, requires_grad=True) - (1 - y[i]) * np.log(1 - ypred, requires_grad=True)
return np.array(loss / len(x), requires_grad = True)
def test(params, x, y, state_labels=None):
fidelity_values = []
dm_labels = [density_matrix(s) for s in state_labels]
predicted = []
for i in range(len(x)):
# fidel_function = lambda y: qcircuit(params, x[i], y)
fidelities = qcircuit(params, x[i], dm_labels[0])
best_fidel = np.rint(fidelities)
predicted.append(best_fidel)
fidelity_values.append(fidelities)
return np.array(predicted), np.array(fidelity_values)
def accuracy_score(y_true, y_pred):
score = y_true == y_pred
return score.sum() / len(y_true)
def iterate_minibatches(inputs, targets, batch_size):
for start_idx in range(0, inputs.shape[0] - batch_size + 1, batch_size):
idxs = slice(start_idx, start_idx + batch_size)
yield inputs[idxs], targets[idxs]
num_training = 200
num_test = 2000
Xdata, y_train = Data_generate(num_training)
X_train = Xdata
y_train = y_train
Xtest, y_test = Data_generate(num_test)
X_test = Xtest
# # Train using Adam optimizer and evaluate the classifier
num_layers = 5
learning_rate = 0.1
epochs = 20
batch_size = 32
# # initialize random weights
params = np.random.uniform(size=(num_layers, 2), requires_grad=True)
# best_params = np.load('../data/best_params_discrete.npy')
# params = best_params
opt = AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999)
predicted_train, fidel_train = test(params, X_train, y_train, state_labels)
accuracy_train = accuracy_score(y_train, predicted_train)
predicted_test, fidel_test = test(params, X_test, y_test, state_labels)
accuracy_test = accuracy_score(y_test, predicted_test)
loss = cost(params, X_test, y_test, state_labels)
print(
"Epoch: {:2d} | Cost: {:3f} | Train accuracy: {:3f} | Test Accuracy: {:3f}".format(
0, loss, accuracy_train, accuracy_test
)
)
best_val_accuracy = accuracy_test
best_params = params.copy()
for it in range(epochs):
for Xbatch, ybatch in iterate_minibatches(X_train, y_train, batch_size=batch_size):
params, _, _, _ = opt.step(cost, params, Xbatch, ybatch, state_labels)
predicted_train, fidel_train = test(params, X_train, y_train, state_labels)
accuracy_train = accuracy_score(y_train, predicted_train)
loss = cost(params, X_train, y_train, state_labels)
predicted_test, fidel_test = test(params, X_test, y_test, state_labels)
accuracy_test = accuracy_score(y_test, predicted_test)
if accuracy_test > best_val_accuracy:
best_params = params.copy()
best_val_accuracy = accuracy_test
res = [it + 1, loss, accuracy_train, accuracy_test]
print(
"Epoch: {:2d} | Loss: {:3f} | Train accuracy: {:3f} | Test accuracy: {:3f}".format(
*res
)
)
Error:
---------------------------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-9ec4870e87ae> in <cell line: 161>()
161 for it in range(epochs):
162 for Xbatch, ybatch in iterate_minibatches(X_train, y_train, batch_size=batch_size):
--> 163 params, _, _, _ = opt.step(cost, params, Xbatch, ybatch, state_labels)
164
165 predicted_train, fidel_train = test(params, X_train, y_train, state_labels)
3 frames
/usr/local/lib/python3.10/dist-packages/pennylane/optimize/gradient_descent.py in step(self, objective_fn, grad_fn, *args, **kwargs)
91 """
92
---> 93 g, _ = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
94 new_args = self.apply_grad(g, args)
95
/usr/local/lib/python3.10/dist-packages/pennylane/optimize/gradient_descent.py in compute_grad(objective_fn, args, kwargs, grad_fn)
120 """
121 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 122 grad = g(*args, **kwargs)
123 forward = getattr(g, "forward", None)
124
/usr/local/lib/python3.10/dist-packages/pennylane/_grad.py in __call__(self, *args, **kwargs)
152 """Evaluates the gradient function, and saves the function value
153 calculated during the forward pass in :attr:`.forward`."""
--> 154 grad_fn, argnum = self._get_grad_fn(args)
155
156 if not isinstance(argnum, int) and not argnum:
/usr/local/lib/python3.10/dist-packages/pennylane/_grad.py in _get_grad_fn(self, args)
139 if trainable:
140 if arg.dtype.name[:3] == "int":
--> 141 raise ValueError("Autograd does not support differentiation of ints.")
142 argnum.append(idx)
143
ValueError: Autograd does not support differentiation of ints.
About Version:
Name: PennyLane
Version: 0.35.1
Summary: PennyLane is a cross-platform Python library for quantum computing, quantum machine learning, and quantum chemistry. Train a quantum computer the same way as a neural network.
Home-page: GitHub - PennyLaneAI/pennylane: PennyLane is a cross-platform Python library for differentiable programming of quantum computers. Train a quantum computer the same way as a neural network.
Author:
Author-email:
License: Apache License 2.0
Location: /usr/local/lib/python3.10/dist-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane_Lightning
Platform info: Linux-6.1.58±x86_64-with-glibc2.35
Python version: 3.10.12
Numpy version: 1.25.2
Scipy version: 1.11.4
Installed devices:
- lightning.qubit (PennyLane_Lightning-0.35.1)
- default.clifford (PennyLane-0.35.1)
- default.gaussian (PennyLane-0.35.1)
- default.mixed (PennyLane-0.35.1)
- default.qubit (PennyLane-0.35.1)
- default.qubit.autograd (PennyLane-0.35.1)
- default.qubit.jax (PennyLane-0.35.1)
- default.qubit.legacy (PennyLane-0.35.1)
- default.qubit.tf (PennyLane-0.35.1)
- default.qubit.torch (PennyLane-0.35.1)
- default.qutrit (PennyLane-0.35.1)
- null.qubit (PennyLane-0.35.1)