Hi there,
So I am trying to recreate the first half of the following penny lane demo:Quantum Circuit Born Machines | PennyLane Demos. However, I would like to use PyTorch instead of jax as used here. Below is the code I have written to attempt to do this which mostly follows the tutorial, I have also tried to use applic silicon (MPS) acceleration but have ran into an error when i run ‘on device’:
# Check if MPS device is available
device = torch.device("mps") if torch.backends.mps.is_available() else torch.device("cpu")
print(f'Using device: {device}')
class MMD:
def __init__(self, scales, space):
gammas = 1 / (2 * (scales**2))
sq_dists = np.abs(space[:, None] - space[None, :]) ** 2
self.K = sum(np.exp(-gamma * sq_dists) for gamma in gammas) / len(scales)
self.K = torch.tensor(self.K, dtype=torch.float64).to(device)
self.scales = scales
def k_expval(self, px, py):
return torch.matmul(px, torch.matmul(self.K, py))
def __call__(self, px, py):
pxy = px - py
return self.k_expval(pxy, pxy)
class QCBM:
def __init__(self, circ, mmd, py):
self.circ = circ
self.mmd = mmd
self.py = torch.tensor(py, dtype=torch.float64).to(device) # target distribution π(x)
def mmd_loss(self, params):
px = self.circ(params)
return self.mmd(px, self.py), px
def kl_divergence(self, px):
# Avoid division by zero and handle log(0) cases
qcbm_probs = px.clone().detach()
target_probs = self.py
kl_div = -torch.sum(target_probs * torch.nan_to_num(torch.log(qcbm_probs / target_probs)))
return kl_div
def get_bars_and_stripes(n):
bitstrings = [list(np.binary_repr(i, n))[::-1] for i in range(2**n)]
bitstrings = np.array(bitstrings, dtype=int)
stripes = bitstrings.copy()
stripes = np.repeat(stripes, n, 0)
stripes = stripes.reshape(2**n, n * n)
bars = bitstrings.copy()
bars = bars.reshape(2**n * n, 1)
bars = np.repeat(bars, n, 1)
bars = bars.reshape(2**n, n * n)
return np.vstack((stripes[0 : stripes.shape[0] - 1], bars[1 : bars.shape[0]]))
n = 3
n_qubits = n**2
dev = qml.device("default.qubit", wires=n_qubits)
n_layers = 6
wshape = qml.StronglyEntanglingLayers.shape(n_layers=n_layers, n_wires=n_qubits)
weights = np.random.random(size=wshape)
weights = torch.tensor(weights, requires_grad=True, dtype=torch.float64).to(device)
@qml.qnode(dev, interface='torch')
def circuit(weights):
qml.StronglyEntanglingLayers(
weights=weights, ranges=[1] * n_layers, wires=range(n_qubits)
)
return qml.probs()
data = get_bars_and_stripes(n)
bitstrings = []
nums = []
for d in data:
bitstrings += ["".join(str(int(i)) for i in d)]
nums += [int(bitstrings[-1], 2)]
probs = np.zeros(2**n_qubits)
probs[nums] = 1 / len(data)
probs = torch.tensor(probs, dtype=torch.float64).to(device) # Ensure probs is a Float tensor
bandwidth = np.array([0.25, 0.5, 1])
space = np.arange(2**n_qubits)
mmd = MMD(bandwidth, space)
qcbm = QCBM(circuit, mmd, probs)
optimizer = optim.Adam([weights], lr=0.1)
# Training loop
num_epochs = 100
for epoch in range(num_epochs):
optimizer.zero_grad()
loss, px = qcbm.mmd_loss(weights)
loss.backward()
optimizer.step()
kl_div = qcbm.kl_divergence(px)
print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item()}, KL Divergence: {kl_div.item()}')
This code runs smoothly on cpu however I notice after 100 runs, the KL divergence/ loss values are very different from the demo. I understand there will be some difference due to the seed for example but I would expect an order of magnitude difference in expected results simply by using another framework. If anyone could shed light as to why there is a discrepancy that be amazing.
For reference in 100 epochs, i get the following result with the above: Epoch 100/100, Loss: 0.009134388315731927, KL Divergence: 0.38180681247905385 whereas the demo achieves: Step: 90 Loss: 0.0004 KL-div: 0.0755.
Finally, when trying to run on the ‘mps’ device i get the following error:
TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.
This is because qml.probs()
returns a troch.float64 object by default and this cannot be changed on the user’s end, are there any options in Pennylane to make the default output a torch.float32
.