DeviceError: Observable QuadX not supported on device strawberryfields.fock

When I used this demo,

the errors are occured:

---------------------------------------------------------------------------
DeviceError                               Traceback (most recent call last)
Cell In[12], line 5
      3 var = var_init
      4 for it in range(500):
----> 5     (var, _, _), _cost = opt.step_and_cost(cost, var, X, Y)
      6     print("Iter: {:5d} | Cost: {:0.7f} ".format(it, _cost))

File d:\miniconda3\lib\site-packages\pennylane\optimize\gradient_descent.py:64, in GradientDescentOptimizer.step_and_cost(self, objective_fn, grad_fn, *args, **kwargs)
     44 def step_and_cost(self, objective_fn, *args, grad_fn=None, **kwargs):
     45     """Update trainable arguments with one step of the optimizer and return the corresponding
     46     objective function value prior to the step.
     47 
   (...)
     61         If single arg is provided, list [array] is replaced by array.
     62     """
---> 64     g, forward = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
     65     new_args = self.apply_grad(g, args)
     67     if forward is None:

File d:\miniconda3\lib\site-packages\pennylane\optimize\gradient_descent.py:122, in GradientDescentOptimizer.compute_grad(objective_fn, args, kwargs, grad_fn)
    104 r"""Compute gradient of the objective function at the given point and return it along with
    105 the objective function forward pass (if available).
    106 
   (...)
    119     will not be evaluted and instead ``None`` will be returned.
    120 """
    121 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
--> 122 grad = g(*args, **kwargs)
    123 forward = getattr(g, "forward", None)
    125 num_trainable_args = sum(getattr(arg, "requires_grad", False) for arg in args)

File d:\miniconda3\lib\site-packages\pennylane\_grad.py:165, in grad.__call__(self, *args, **kwargs)
    162     self._forward = self._fun(*args, **kwargs)
    163     return ()
--> 165 grad_value, ans = grad_fn(*args, **kwargs)  # pylint: disable=not-callable
    166 self._forward = ans
    168 return grad_value

File d:\miniconda3\lib\site-packages\autograd\wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File d:\miniconda3\lib\site-packages\pennylane\_grad.py:183, in grad._grad_with_forward(fun, x)
    177 @staticmethod
    178 @unary_to_nary
    179 def _grad_with_forward(fun, x):
    180     """This function is a replica of ``autograd.grad``, with the only
    181     difference being that it returns both the gradient *and* the forward pass
    182     value."""
--> 183     vjp, ans = _make_vjp(fun, x)  # pylint: disable=redefined-outer-name
    185     if vspace(ans).size != 1:
    186         raise TypeError(
    187             "Grad only applies to real scalar-output functions. "
    188             "Try jacobian, elementwise_grad or holomorphic_grad."
    189         )

File d:\miniconda3\lib\site-packages\autograd\core.py:10, in make_vjp(fun, x)
      8 def make_vjp(fun, x):
      9     start_node = VJPNode.new_root()
---> 10     end_value, end_node =  trace(start_node, fun, x)
     11     if end_node is None:
     12         def vjp(g): return vspace(x).zeros()

File d:\miniconda3\lib\site-packages\autograd\tracer.py:10, in trace(start_node, fun, x)
      8 with trace_stack.new_trace() as t:
      9     start_box = new_box(x, t, start_node)
---> 10     end_box = fun(start_box)
     11     if isbox(end_box) and end_box._trace == start_box._trace:
     12         return end_box._value, end_box._node

File d:\miniconda3\lib\site-packages\autograd\wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
     13 else:
     14     subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)

Cell In[6], line 2, in cost(var, features, labels)
      1 def cost(var, features, labels):
----> 2     preds = [quantum_neural_net(var, x) for x in features]
      3     return square_loss(labels, preds)

Cell In[6], line 2, in <listcomp>(.0)
      1 def cost(var, features, labels):
----> 2     preds = [quantum_neural_net(var, x) for x in features]
      3     return square_loss(labels, preds)

File d:\miniconda3\lib\site-packages\pennylane\qnode.py:1039, in QNode.__call__(self, *args, **kwargs)
   1034         full_transform_program._set_all_argnums(
   1035             self, args, kwargs, argnums
   1036         )  # pylint: disable=protected-access
   1038 # pylint: disable=unexpected-keyword-arg
-> 1039 res = qml.execute(
   1040     (self._tape,),
   1041     device=self.device,
   1042     gradient_fn=self.gradient_fn,
   1043     interface=self.interface,
   1044     transform_program=full_transform_program,
   1045     config=config,
   1046     gradient_kwargs=self.gradient_kwargs,
   1047     override_shots=override_shots,
   1048     **self.execute_kwargs,
   1049 )
   1051 res = res[0]
   1053 # convert result to the interface in case the qfunc has no parameters

File d:\miniconda3\lib\site-packages\pennylane\interfaces\execution.py:808, in execute(tapes, device, gradient_fn, interface, transform_program, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp)
    803 ml_boundary_execute = _get_ml_boundary_execute(
    804     interface, _grad_on_execution, config.use_device_jacobian_product
    805 )
    807 if interface in jpc_interfaces:
--> 808     results = ml_boundary_execute(tapes, execute_fn, jpc, device=device)
    809 else:
    810     results = ml_boundary_execute(
    811         tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff
    812     )

File d:\miniconda3\lib\site-packages\pennylane\interfaces\autograd.py:147, in autograd_execute(tapes, execute_fn, jpc, device)
    142 # pylint misidentifies autograd.builtins as a dict
    143 # pylint: disable=no-member
    144 parameters = autograd.builtins.tuple(
    145     [autograd.builtins.list(t.get_parameters()) for t in tapes]
    146 )
--> 147 return _execute(parameters, tuple(tapes), execute_fn, jpc)

File d:\miniconda3\lib\site-packages\autograd\tracer.py:44, in primitive.<locals>.f_wrapped(*args, **kwargs)
     42 parents = tuple(box._node for _     , box in boxed_args)
     43 argnums = tuple(argnum    for argnum, _   in boxed_args)
---> 44 ans = f_wrapped(*argvals, **kwargs)
     45 node = node_constructor(ans, f_wrapped, argvals, kwargs, argnums, parents)
     46 return new_box(ans, trace, node)

File d:\miniconda3\lib\site-packages\autograd\tracer.py:48, in primitive.<locals>.f_wrapped(*args, **kwargs)
     46     return new_box(ans, trace, node)
     47 else:
---> 48     return f_raw(*args, **kwargs)

File d:\miniconda3\lib\site-packages\pennylane\interfaces\autograd.py:168, in _execute(parameters, tapes, execute_fn, jpc)
    150 @autograd.extend.primitive
    151 def _execute(
    152     parameters,
   (...)
    155     jpc,
    156 ):  # pylint: disable=unused-argument
    157     """Autodifferentiable wrapper around a way of executing tapes.
    158 
    159     Args:
   (...)
    166 
    167     """
--> 168     return execute_fn(tapes)

File d:\miniconda3\lib\site-packages\pennylane\interfaces\execution.py:261, in _make_inner_execute.<locals>.inner_execute(tapes, **_)
    259 if numpy_only:
    260     tapes = tuple(qml.transforms.convert_to_numpy_parameters(t) for t in tapes)
--> 261 return cached_device_execution(tapes)

File d:\miniconda3\lib\site-packages\pennylane\interfaces\execution.py:383, in cache_execute.<locals>.wrapper(tapes, **kwargs)
    378         return (res, []) if return_tuple else res
    380 else:
    381     # execute all unique tapes that do not exist in the cache
    382     # convert to list as new device interface returns a tuple
--> 383     res = list(fn(tuple(execution_tapes.values()), **kwargs))
    385 final_res = []
    387 for i, tape in enumerate(tapes):

File d:\miniconda3\lib\contextlib.py:79, in ContextDecorator.__call__.<locals>.inner(*args, **kwds)
     76 @wraps(func)
     77 def inner(*args, **kwds):
     78     with self._recreate_cm():
---> 79         return func(*args, **kwds)

File d:\miniconda3\lib\site-packages\pennylane\_device.py:532, in Device.batch_execute(self, circuits)
    527 for circuit in circuits:
    528     # we need to reset the device here, else it will
    529     # not start the next computation in the zero state
    530     self.reset()
--> 532     res = self.execute(circuit.operations, circuit.measurements)
    533     results.append(res)
    535 if self.tracker.active:

File d:\miniconda3\lib\site-packages\pennylane\_device.py:437, in Device.execute(self, queue, observables, parameters, **kwargs)
    414 def execute(self, queue, observables, parameters=None, **kwargs):
    415     """Execute a queue of quantum operations on the device and then measure the given observables.
    416 
    417     For plugin developers: Instead of overwriting this, consider implementing a suitable subset of
   (...)
    435         array[float]: measured value(s)
    436     """
--> 437     self.check_validity(queue, observables)
    438     self._op_queue = queue
    439     self._obs_queue = observables

File d:\miniconda3\lib\site-packages\pennylane\_device.py:1011, in Device.check_validity(self, queue, observables)
   1008 observable_name = o.name
   1010 if not self.supports_observable(observable_name):
-> 1011     raise DeviceError(
   1012         f"Observable {observable_name} not supported on device {self.short_name}"
   1013     )

DeviceError: Observable QuadX not supported on device strawberryfields.fock

qml.about()

Name: PennyLane
Version: 0.34.0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/PennyLaneAI/pennylane
Author: 
Author-email: 
License: Apache License 2.0
Location: d:\miniconda3\lib\site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane-Lightning, PennyLane-SF

Platform info:           Windows-10-10.0.22000-SP0
Python version:          3.9.1
Numpy version:           1.23.5
Scipy version:           1.12.0
Installed devices:
- default.gaussian (PennyLane-0.34.0)
- default.mixed (PennyLane-0.34.0)
- default.qubit (PennyLane-0.34.0)
- default.qubit.autograd (PennyLane-0.34.0)
- default.qubit.jax (PennyLane-0.34.0)
- default.qubit.legacy (PennyLane-0.34.0)
- default.qubit.tf (PennyLane-0.34.0)
- default.qubit.torch (PennyLane-0.34.0)
- default.qutrit (PennyLane-0.34.0)
- null.qubit (PennyLane-0.34.0)
- strawberryfields.fock (PennyLane-SF-0.29.1)
- strawberryfields.gaussian (PennyLane-SF-0.29.1)
- strawberryfields.gbs (PennyLane-SF-0.29.1)
- strawberryfields.remote (PennyLane-SF-0.29.1)
- strawberryfields.tf (PennyLane-SF-0.29.1)
- lightning.qubit (PennyLane-Lightning-0.34.0)

Hello @RX1 !

In the demo there is a warning:

It seems that in your current environment, the PennyLane installed version is 0.34. I suggest you create a clean environment with 0.29 version.

I hope it helps! :wink: