How to call opt.step function with two vars sets


#1

From the tutorial, we know: var = opt.step(lambda v: cost(v, X, Y), var)
My question is: if I have two vars, say. var_1, var_2, and they have different sizes. How do I call the step function?


#2

Hi @cubicgate!

The optimizers provided with PennyLane all work best if the trainable parameters are a single NumPy array.

If this is not the case, there are tricks you can do (such as reshaping/flattening/concatenation) to make sure that the final parameters passed to your cost function are a flattened array:

import pennylane as qml
from pennylane import numpy as np, expval, var

dev = qml.device("default.qubit", wires=3)

@qml.qnode(dev)
def circuit(var1, var2):
    qml.Hadamard(wires=0)
    qml.Hadamard(wires=1)

    # layer 1
    qml.RX(var1[0], wires=0)
    qml.RZ(var1[1], wires=1)
    qml.CNOT(wires=(0, 1))

    # layer 2
    qml.RX(var2[0], wires=0)
    qml.RY(var2[1], wires=1)
    qml.RZ(var2[2], wires=2)

    qml.CNOT(wires=(0, 1))
    return expval(qml.PauliY(0)), var(qml.PauliZ(1))

var1 = np.array([0.54, -0.12])
var2 = np.array([-0.6543, 0.123, 1.95])

opt = qml.GradientDescentOptimizer(0.1)

def cost(params):
    """Trains the output of the circuit such
    that the parameters for each layer
    result in the expectation <Y> on wire
    0 is a magnitude of 2 different from
    the variance var(PauliZ) on wire 1
    """
    var1 = params[:2]
    var2 = params[2:]
    res = circuit(var1, var2)
    return np.abs(res[0] - res[1] + 2)

params = np.concatenate([var1, var2])

for i in range(100):
    params = opt.step(cost, params)
    print("Cost:", cost(params))

print("Final circuit value:", circuit(params))
print("Final parameters:", params)

#3

Alternatively, you may wish to use the more advanced PyTorch or TF interfaces to the PennyLane QNode. These allow the QNode to be compatible with PyTorch/TensorFlow tensors and optimizers, which are much more flexible than the optimizers provided by PennyLane.

For example, below is the exact same program as my previous post, this time written using PyTorch instead of NumPy:

import torch
from torch.autograd import Variable

import numpy as np

import pennylane as qml
from pennylane import expval, var

dev = qml.device("default.qubit", wires=3)

@qml.qnode(dev, interface="torch")
def circuit(var1, var2):
    qml.Hadamard(wires=0)
    qml.Hadamard(wires=1)

    # layer 1
    qml.RX(var1[0], wires=0)
    qml.RZ(var1[1], wires=1)
    qml.CNOT(wires=(0, 1))

    # layer 2
    qml.RX(var2[0], wires=0)
    qml.RY(var2[1], wires=1)
    qml.RZ(var2[2], wires=2)

    qml.CNOT(wires=(0, 1))
    return expval(qml.PauliY(0)), var(qml.PauliZ(1))

var1 = Variable(torch.tensor([0.54, -0.12]), requires_grad=True)
var2 = Variable(torch.tensor([-0.6543, 0.123, 1.95]), requires_grad=True)

def cost(var1, var2):
    """Trains the output of the circuit such
    that the parameters for each layer
    result in the expectation <Y> on wire
    0 is a magnitude of 2 different from
    the variance var(PauliZ) on wire 1
    """
    res = circuit(var1, var2)
    return torch.abs(res[0] - res[1] + 2)

opt = torch.optim.Adam([var1, var2], lr=0.1)

for i in range(100):
    opt.zero_grad()
    loss = cost(var1, var2)
    loss.backward()
    opt.step()
    print("Cost:", loss)

var1_final, var2_final = opt.param_groups[0]['params']

print("Final circuit value:", circuit(var1_final, var2_final))
print("Final parameters:", var1_final, var2_final)

Note that the PyTorch optimizer supports two trainable tensors of different size, so there is no need to concatenate them.


#4

Thanks Josh for your code and explanation!