AttributeError: 'Adjoint' object has no attribute 'supports_heisenberg'

def DataEncoding(data, wires_start, wires_end):
    qml.Squeezing(data[0], data[1], wires=wires_start)
    qml.Squeezing(data[2], data[3], wires=wires_end)
    qml.Kerr(data[4], wires=wires_start)
    qml.Kerr(data[5], wires=wires_end)
    qml.Rotation(data[6], wires=wires_start)
    qml.Rotation(data[7], wires=wires_end)
    qml.Beamsplitter(data[8], data[9],wires=[wires_start, wires_end])

I used the following code: 
        qml.adjoint(DataEncoding)(x2, 0, 1)
--------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[9], line 1
----> 1 his_acc_val, his_acc_train, his_cost = TrainAdam(params_init)
      2 print(his_acc_val, his_acc_train, his_cost)

Cell In[8], line 13, in TrainAdam(params_init)
     11 feats_train_batch = feats_train[batch_index]
     12 Y_train_batch = Y_train[batch_index]
---> 13 (weights, _, _), _cost = opt.step_and_cost(cost, weights, feats_train_batch, Y_train_batch)
     14 predictions_train = [np.sign(variational_classifier(weights, feat)) for feat in feats_train]
     15 predictions_val = [np.sign(variational_classifier(weights, feat)) for feat in feats_val]

File d:\miniconda3\lib\site-packages\pennylane\optimize\gradient_descent.py:59, in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
     39 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
     40     """Update trainable arguments with one step of the optimizer and return the corresponding
     41     objective function value prior to the step.
     42 
   (...)
     56         If single arg is provided, list [array] is replaced by array.
     57     """
---> 59     g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
     60     new_args = self.apply_grad(g, args)
     62     if forward is None:

File d:\miniconda3\lib\site-packages\pennylane\optimize\gradient_descent.py:117, in GradientDescentOptimizer.compute_grad(objective_fn, args, kwargs, grad_fn)
     99 r"""Compute gradient of the objective function at the given point and return it along with
    100 the objective function forward pass (if available).
    101 
   (...)
    114     will not be evaluted and instead ``None`` will be returned.
    115 """
    116 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 117 grad = g(*args, **kwargs)
    118 forward = getattr(g, "forward", None)
    120 num_trainable_args = sum(getattr(arg, "requires_grad", False) for arg in args)

File d:\miniconda3\lib\site-packages\pennylane\_grad.py:115, in grad.__call__(self, *args, **kwargs)
    112     self._forward = self._fun(*args, **kwargs)
    113     return ()
--> 115 grad_value, ans = grad_fn(*args, **kwargs)
    116 self._forward = ans
    118 return grad_value

File d:\miniconda3\lib\site-packages\autograd\wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File d:\miniconda3\lib\site-packages\pennylane\_grad.py:141, in grad._grad_with_forward(fun, x)
    135 if not vspace(ans).size == 1:
    136     raise TypeError(
    137         "Grad only applies to real scalar-output functions. "
    138         "Try jacobian, elementwise_grad or holomorphic_grad."
    139     )
--> 141 grad_value = vjp(vspace(ans).ones())
    142 return grad_value, ans

File d:\miniconda3\lib\site-packages\autograd\core.py:14, in make_vjp.<locals>.vjp(g)
---> 14 def vjp(g): return backward_pass(g, end_node)

File d:\miniconda3\lib\site-packages\autograd\core.py:21, in backward_pass(g, end_node)
     19 for node in toposort(end_node):
     20     outgrad = outgrads.pop(node)
---> 21     ingrads = node.vjp(outgrad[0])
     22     for parent, ingrad in zip(node.parents, ingrads):
     23         outgrads[parent] = add_outgrads(outgrads.get(parent), ingrad)

File d:\miniconda3\lib\site-packages\autograd\core.py:67, in defvjp.<locals>.vjp_argnums.<locals>.<lambda>(g)
     64         raise NotImplementedError(
     65             "VJP of {} wrt argnum 0 not defined".format(fun.__name__))
     66     vjp = vjpfun(ans, *args, **kwargs)
---> 67     return lambda g: (vjp(g),)
     68 elif L == 2:
     69     argnum_0, argnum_1 = argnums

File d:\miniconda3\lib\site-packages\pennylane\interfaces\autograd.py:228, in vjp.<locals>.grad_fn(dy)
    226 if _n == max_diff:
    227     with qml.tape.Unwrap(*tapes):
--> 228         vjp_tapes, processing_fn = qml.gradients.batch_vjp(
    229             tapes,
    230             dy,
    231             gradient_fn,
    232             reduction="append",
    233             gradient_kwargs=gradient_kwargs,
    234         )
    236         vjps = processing_fn(execute_fn(vjp_tapes)[0])
    238 else:

File d:\miniconda3\lib\site-packages\pennylane\gradients\vjp.py:525, in batch_vjp(tapes, dys, gradient_fn, shots, reduction, gradient_kwargs)
    523 # Loop through the tapes and dys vector
    524 for tape, dy in zip(tapes, dys):
--> 525     g_tapes, fn = vjp(tape, dy, gradient_fn, shots=shots, gradient_kwargs=gradient_kwargs)
    526     reshape_info.append(len(g_tapes))
    527     processing_fns.append(fn)

File d:\miniconda3\lib\site-packages\pennylane\gradients\vjp.py:381, in vjp(tape, dy, gradient_fn, shots, gradient_kwargs)
    378 except (AttributeError, TypeError, NotImplementedError):
    379     pass
--> 381 gradient_tapes, fn = gradient_fn(tape, shots=shots, **gradient_kwargs)
    383 def processing_fn(results, num=None):
    384     # postprocess results to compute the Jacobian
    385     jac = fn(results)

File d:\miniconda3\lib\site-packages\pennylane\transforms\batch_transform.py:340, in batch_transform.__call__(self, *targs, **tkwargs)
    335     return self._device_wrapper(*targs, **tkwargs)(qnode)
    337 if isinstance(qnode, qml.tape.QuantumScript):
    338     # Input is a quantum tape.
    339     # tapes, fn = some_transform(tape, *transform_args)
--> 340     return self._tape_wrapper(*targs, **tkwargs)(qnode)
    342 if isinstance(qnode, (qml.QNode, qml.ExpvalCost)):
    343     # Input is a QNode:
    344     # result = some_transform(qnode, *transform_args)(*qnode_args)
    345     wrapper = self.qnode_wrapper(qnode, targs, tkwargs)

File d:\miniconda3\lib\site-packages\pennylane\transforms\batch_transform.py:430, in batch_transform._tape_wrapper.<locals>.<lambda>(tape)
    429 def _tape_wrapper(self, *targs, **tkwargs):
--> 430     return lambda tape: self.construct(tape, *targs, **tkwargs)

File d:\miniconda3\lib\site-packages\pennylane\transforms\batch_transform.py:412, in batch_transform.construct(self, tape, *args, **kwargs)
    409 if expand and self.expand_fn is not None:
    410     tape = self.expand_fn(tape, *args, **kwargs)
--> 412 tapes, processing_fn = self.transform_fn(tape, *args, **kwargs)
    414 if processing_fn is None:
    416     def processing_fn(x):

File d:\miniconda3\lib\site-packages\pennylane\gradients\parameter_shift_cv.py:661, in param_shift_cv(tape, dev, argnum, shifts, gradient_recipes, fallback_fn, f0, force_order2, shots)
    656 if any(isinstance(m, StateMP) for m in tape.measurements):
    657     raise ValueError(
    658         "Computing the gradient of circuits that return the state is not supported."
    659     )
--> 661 _gradient_analysis_cv(tape)
    663 if argnum is None and not tape.trainable_params:
    664     warnings.warn(
    665         "Attempted to compute the gradient of a tape with no trainable parameters. "
    666         "If this is unintended, please mark trainable parameters in accordance with the "
    667         "chosen auto differentiation framework, or via the 'tape.trainable_params' property."
    668     )

File d:\miniconda3\lib\site-packages\pennylane\gradients\parameter_shift_cv.py:128, in _gradient_analysis_cv(tape)
    125 tape._gradient_fn = param_shift_cv
    127 for idx, info in enumerate(tape._par_info):
--> 128     info["grad_method"] = _grad_method(tape, idx)

File d:\miniconda3\lib\site-packages\pennylane\gradients\parameter_shift_cv.py:81, in _grad_method(tape, idx)
     77 # For parameter-shift compatible CV gates, we need to check both the
     78 # intervening gates, and the type of the observable.
     79 best_method = "A"
---> 81 if any(not k.supports_heisenberg for k in ops_between):
     82     # non-Gaussian operators present in-between the operation
     83     # and the observable. Must fallback to numeric differentiation.
     84     best_method = "F"
     86 elif m.obs.ev_order == 2:

File d:\miniconda3\lib\site-packages\pennylane\gradients\parameter_shift_cv.py:81, in <genexpr>(.0)
     77 # For parameter-shift compatible CV gates, we need to check both the
     78 # intervening gates, and the type of the observable.
     79 best_method = "A"
---> 81 if any(not k.supports_heisenberg for k in ops_between):
     82     # non-Gaussian operators present in-between the operation
     83     # and the observable. Must fallback to numeric differentiation.
     84     best_method = "F"
     86 elif m.obs.ev_order == 2:

AttributeError: 'Adjoint' object has no attribute 'supports_heisenberg'

Hey @RX1,

Your code example is incomplete, so I’m not sure where the error is stemming from. Based on your traceback, it’s coming from here:

File d:\miniconda3\lib\site-packages\pennylane\gradients\parameter_shift_cv.py:81, in _grad_method(tape, idx)
     77 # For parameter-shift compatible CV gates, we need to check both the
     78 # intervening gates, and the type of the observable.
     79 best_method = "A"
---> 81 if any(not k.supports_heisenberg for k in ops_between):
     82     # non-Gaussian operators present in-between the operation
     83     # and the observable. Must fallback to numeric differentiation.
     84     best_method = "F"
     86 elif m.obs.ev_order == 2:

Whatever k is, it doesn’t have a supports_heisenberg attribute. Here’s a searched list of operators that have such an attribute: Search — PennyLane 0.35.0 documentation

Let me know if that helps!