When I try to train a line with lighting gpu, I use (diff_method="join"), and the program will report an error,I am a novice and ask for help

KeyError Traceback (most recent call last)
Cell In[6], line 5
3 q_weights_flat = np.array([0.5,0.5,0.6,0.7],requires_grad=True)
4 pool_weights_flat = np.array([0.3,0.4,1,1],requires_grad=True)
----> 5 q_net(q_in, c_weights_flat, q_weights_flat, pool_weights_flat)

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane/qnode.py:847, in QNode.call(self, *args, **kwargs)
843 self._update_original_device()
845 return res
→ 847 res = qml.execute(
848 [self.tape],
849 device=self.device,
850 gradient_fn=self.gradient_fn,
851 interface=self.interface,
852 gradient_kwargs=self.gradient_kwargs,
853 override_shots=override_shots,
854 **self.execute_kwargs,
855 )
857 if old_interface == “auto”:
858 self.interface = “auto”

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane/interfaces/execution.py:724, in execute(tapes, device, gradient_fn, interface, mode, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform)
718 except ImportError as e:
719 raise qml.QuantumFunctionError(
720 f"{mapped_interface} not found. Please install the latest "
721 f"version of {mapped_interface} to enable the ‘{mapped_interface}’ interface."
722 ) from e
→ 724 res = _execute(
725 tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode
726 )
728 return batch_fn(res)

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane/interfaces/autograd.py:81, in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff, mode)
75 # pylint misidentifies autograd.builtins as a dict
76 # pylint: disable=no-member
77 parameters = autograd.builtins.tuple(
78 [autograd.builtins.list(t.get_parameters()) for t in tapes]
79 )
—> 81 return _execute(
82 parameters,
83 tapes=tapes,
84 device=device,
85 execute_fn=execute_fn,
86 gradient_fn=gradient_fn,
87 gradient_kwargs=gradient_kwargs,
88 _n=_n,
89 max_diff=max_diff,
90 )[0]

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/autograd/tracer.py:48, in primitive..f_wrapped(*args, **kwargs)
46 return new_box(ans, trace, node)
47 else:
—> 48 return f_raw(*args, **kwargs)

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane/interfaces/autograd.py:125, in _execute(parameters, tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff)
104 “”“Autodifferentiable wrapper around Device.batch_execute.
105
106 The signature of this function is designed to work around Autograd restrictions.
(…)
122 understand the consequences!
123 “””
124 with qml.tape.Unwrap(*tapes):
→ 125 res, jacs = execute_fn(tapes, **gradient_kwargs)
127 for i, r in enumerate(res):
128 if any(isinstance(m, CountsMP) for m in tapes[i].measurements):

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/contextlib.py:79, in ContextDecorator.call..inner(*args, **kwds)
76 @wraps(func)
77 def inner(*args, **kwds):
78 with self._recreate_cm():
—> 79 return func(*args, **kwds)

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane/_device.py:575, in Device.execute_and_gradients(self, circuits, method, **kwargs)
570 for circuit in circuits:
571 # Evaluations and gradients are paired, so that
572 # devices can re-use the device state for the
573 # gradient computation (if applicable).
574 res.append(self.batch_execute([circuit])[0])
→ 575 jacs.append(gradient_method(circuit, **kwargs))
577 return res, jacs

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane_lightning/lightning_qubit.py:553, in LightningQubit.adjoint_jacobian(self, tape, starting_state, use_device_state)
546 raise QuantumFunctionError(
547 "This method does not support statevector return type. "
548 “Use vjp method instead for this purpose.”
549 )
551 self._check_adjdiff_supported_operations(tape.operations)
→ 553 processed_data = self._process_jacobian_tape(tape, starting_state, use_device_state)
555 if not processed_data: # training_params is empty
556 return np.array(, dtype=self._state.dtype)

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane_lightning/lightning_qubit.py:492, in LightningQubit._process_jacobian_tape(self, tape, starting_state, use_device_state)
489 ket = self._pre_rotated_state
491 obs_serialized = _serialize_observables(tape, self.wire_map, use_csingle=self.use_csingle)
→ 492 ops_serialized, use_sp = _serialize_ops(tape, self.wire_map)
494 ops_serialized = create_ops_list(*ops_serialized)
496 # We need to filter out indices in trainable_params which do not
497 # correspond to operators.

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane_lightning/_serialize.py:190, in _serialize_ops(tape, wires_map)
187 mats.append()
189 wires_list = single_op.wires.tolist()
→ 190 wires.append([wires_map[w] for w in wires_list])
192 inverses = [False] * len(names)
193 return (names, params, wires, inverses, mats), uses_stateprep

File ~/anaconda3/envs/env_pennylane_gpu/lib/python3.10/site-packages/pennylane_lightning/_serialize.py:190, in (.0)
187 mats.append()
189 wires_list = single_op.wires.tolist()
→ 190 wires.append([wires_map[w] for w in wires_list])
192 inverses = [False] * len(names)
193 return (names, params, wires, inverses, mats), uses_stateprep

KeyError: tensor(1, requires_grad=True)

Hi @Waldeinsamkeit99, welcome to our community!

It’s great that you’re learning!

diff_method specifies the differentiation method that is to be used by PennyLane. You can learn more about this in the Gradients section of our docs. Notice that only specific options are available, and “join” is not one of the options.

I would recommend that you start by not specifying the diff_method argument. If you don’t specify it PennyLane will choose the best one available!

Also, if you run into more trouble make sure to post here a minimal non-working example. This means creating the smallest piece of code (minimal version of your code) that is self contained and can be run by someone else, which reproduces your problem.

If you have any questions about this please let me know.