Pennylane and Pytorch running on GPU

Hi,

I tried the following code which also gives errors. Could anyone help?

class QuantumLayer(nn.Module):
def __init__(self, n_qubits = 7, include_view = True):
    super().__init__()
    self.n_qubits = n_qubits
    self.sim_dev = qml.device('default.qubit', wires=n_qubits)
    self.include_view = include_view
    self.weight1 = nn.Parameter(torch.randn(4, 3))
    self.weight2 = nn.Parameter(torch.randn(3, 3))
    self.weight3 = nn.Parameter(torch.randn(4, 3))

def circular_entanglement_layer(self, entangling_qubits):
    # Circular entanglement
    if len(entangling_qubits) > self.n_qubits:
        raise Exception('Num of entangling qubits must be smaller than num of available qubits')
    n = len(entangling_qubits)
    for i in range(0, n):
        qml.CNOT(wires=[entangling_qubits[i], entangling_qubits[(i+1)%n]])

def rotation_embedding(self, x, encoding_qubits):
    # Perform rotation embedding
    # x: (batch_size, 3 + 3)
    if np.shape(x)[-1] != len(encoding_qubits):
        raise Exception('lengths of data to be encoded and encoding qubits must be equal')
    qml.AngleEmbedding(x, wires= encoding_qubits)

def variation_layer(self, params, var_qubits):
    if len(params) != len(var_qubits):
        raise Exception('lengths must be equal in variational layers')
    for i in range(len(params)):
        qml.Rot(params[i, 0], params[i, 1], params[i, 2], wires=var_qubits[i])

def QNode(self, inputs, weights1, weights2, weights3):

    @qml.qnode(self.sim_dev, interface = 'torch')
    def qnode(inputs, weights1, weights2, weights3):
        # inputs: (batch_size, 3 + 3), weights: (4, 3)
        self.rotation_embedding(inputs, list(range(3)) + list(range(3, 6)))
        self.variation_layer(weights1, list(range(4)))
        self.circular_entanglement_layer(list(range(4)))
        self.variation_layer(weights3, list(range(4)))
        self.circular_entanglement_layer(list(range(4)))

        if self.include_view:
          qml.CNOT(wires = [4, 0])
          qml.CNOT(wires = [4, 1])
          qml.CNOT(wires = [4, 2])
          qml.CNOT(wires = [5, 0])
          qml.CNOT(wires = [5, 1])
          qml.CNOT(wires = [5, 2])
          qml.CNOT(wires = [6, 0])
          qml.CNOT(wires = [6, 1])
          qml.CNOT(wires = [6, 2])
          self.variation_layer(weights2, list(range(3)))

        else:
          pass

        return [qml.expval(qml.PauliZ(i)) for i in range(4)] 

    return qnode(inputs, weights1, weights2, weights3)
    
def forward(self, chunk_pos, chunk_view):
    # chunk_pos: (batch_size, 3), chunk_view: (batch_size, 3)
    inputs = torch.cat((chunk_pos, chunk_view), dim = -1)

    for elem in inputs:
        pdb.set_trace()
        q_out_elem = self.QNode(elem, self.weight1, self.weight2, self.weight3).float().unsqueeze(0)
        q_out = torch.cat((q_out, q_out_elem))
    return q_out

But I got errors:

/usr/local/lib/python3.10/dist-packages/pennylane/devices/default_qubit.py in (.0)
473 if max_workers is None:
474 results = tuple(
→ 475 simulate(
476 c,
477 rng=self._rng,

/usr/local/lib/python3.10/dist-packages/pennylane/devices/qubit/simulate.py in simulate(circuit, rng, prng_key, debugger, interface)
267
268 “”"
→ 269 state, is_state_batched = get_final_state(circuit, debugger=debugger, interface=interface)
270 return measure_final_state(circuit, state, is_state_batched, rng=rng, prng_key=prng_key)

/usr/local/lib/python3.10/dist-packages/pennylane/devices/qubit/simulate.py in get_final_state(circuit, debugger, interface)
159 is_state_batched = bool(prep and prep.batch_size is not None)
160 for op in circuit.operations[bool(prep) :]:
→ 161 state = apply_operation(op, state, is_state_batched=is_state_batched, debugger=debugger)
162
163 # Handle postselection on mid-circuit measurements

/usr/lib/python3.10/functools.py in wrapper(*args, **kw)
887 ‘1 positional argument’)
888
→ 889 return dispatch(args[0].class)(*args, **kw)
890
891 funcname = getattr(func, ‘name’, ‘singledispatch function’)

/usr/local/lib/python3.10/dist-packages/pennylane/devices/qubit/apply_operation.py in apply_operation(op, state, is_state_batched, debugger)
196
197 “”"
→ 198 return _apply_operation_default(op, state, is_state_batched, debugger)
199
200

/usr/local/lib/python3.10/dist-packages/pennylane/devices/qubit/apply_operation.py in _apply_operation_default(op, state, is_state_batched, debugger)
206 and math.ndim(state) < EINSUM_STATE_WIRECOUNT_PERF_THRESHOLD
207 ) or (op.batch_size and is_state_batched):
→ 208 return apply_operation_einsum(op, state, is_state_batched=is_state_batched)
209 return apply_operation_tensordot(op, state, is_state_batched=is_state_batched)
210

/usr/local/lib/python3.10/dist-packages/pennylane/devices/qubit/apply_operation.py in apply_operation_einsum(op, state, is_state_batched)
98 reshaped_mat = math.reshape(mat, new_mat_shape)
99
→ 100 return math.einsum(einsum_indices, reshaped_mat, state)
101
102

/usr/local/lib/python3.10/dist-packages/pennylane/math/multi_dispatch.py in einsum(indices, like, optimize, *operands)
537 if like is None:
538 like = get_interface(*operands)
→ 539 operands = np.coerce(operands, like=like)
540 if optimize is None or like == “torch”:
541 # torch einsum doesn’t support the optimize keyword argument

/usr/local/lib/python3.10/dist-packages/autoray/autoray.py in do(fn, like, *args, **kwargs)
78 “”"
79 backend = choose_backend(fn, *args, like=like, **kwargs)
—> 80 return get_lib_fn(backend, fn)(*args, **kwargs)
81
82

/usr/local/lib/python3.10/dist-packages/pennylane/math/single_dispatch.py in _coerce_types_torch(tensors)
603 # GPU specific case
604 device_names = ", “.join(str(d) for d in device_set)
→ 605 raise RuntimeError(
606 f"Expected all tensors to be on the same device, but found at least two devices, {device_names}!”
607 )

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0, cpu!

Does someone know how to fix this?