PennyLane-SF 0.29.1:AttributeError: 'NoneType' object has no attribute '_trunc'

I used my favorite PENNY-SF-0.29 while developing some quantum DEMOs for my company. but the following problem occurs after constructing a photonic circuit:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[10], line 1
----> 1 his_acc_val1, his_acc_train1, his_cost1 = TrainQKSAN(params_init)
      3 print(his_acc_val1, his_acc_train1, his_cost1)

Cell In[9], line 17, in TrainQKSAN(params_init)
     15 Y_train_batch = Y_train[batch_index]
     16 #cost(params, bias, features, labels):
---> 17 weights, _, _ = opt.step(cost, weights, feats_train_batch, Y_train_batch)
     19 # Compute predictions on train and validation set
     20 predictions_train = [np.sign(PGKSAN(inputs, weights)) for inputs in feats_train]

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/optimize/gradient_descent.py:88, in GradientDescentOptimizer.step(self, objective_fn, grad_fn, *args, **kwargs)
     70 def step(self, objective_fn, *args, grad_fn=None, **kwargs):
     71     """Update trainable arguments with one step of the optimizer.
     72 
     73     Args:
   (...)
     85         If single arg is provided, list [array] is replaced by array.
     86     """
---> 88     g, _ = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn)
     89     new_args = self.apply_grad(g, args)
     91     # unwrap from list if one argument, cleaner return

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/optimize/nesterov_momentum.py:71, in NesterovMomentumOptimizer.compute_grad(self, objective_fn, args, kwargs, grad_fn)
     68         shifted_args[index] = args[index] - self.momentum * self.accumulation[index]
     70 g = get_gradient(objective_fn) if grad_fn is None else grad_fn
---> 71 grad = g(*shifted_args, **kwargs)
     72 forward = getattr(g, "forward", None)
     74 grad = (grad,) if len(trainable_indices) == 1 else grad

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_grad.py:115, in grad.__call__(self, *args, **kwargs)
    112     self._forward = self._fun(*args, **kwargs)
    113     return ()
--> 115 grad_value, ans = grad_fn(*args, **kwargs)
    116 self._forward = ans
    118 return grad_value

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_grad.py:133, in grad._grad_with_forward(fun, x)
    127 @staticmethod
    128 @unary_to_nary
    129 def _grad_with_forward(fun, x):
    130     """This function is a replica of ``autograd.grad``, with the only
    131     difference being that it returns both the gradient *and* the forward pass
    132     value."""
--> 133     vjp, ans = _make_vjp(fun, x)
    135     if not vspace(ans).size == 1:
    136         raise TypeError(
    137             "Grad only applies to real scalar-output functions. "
    138             "Try jacobian, elementwise_grad or holomorphic_grad."
    139         )

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/core.py:10, in make_vjp(fun, x)
      8 def make_vjp(fun, x):
      9     start_node = VJPNode.new_root()
---> 10     end_value, end_node =  trace(start_node, fun, x)
     11     if end_node is None:
     12         def vjp(g): return vspace(x).zeros()

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:10, in trace(start_node, fun, x)
      8 with trace_stack.new_trace() as t:
      9     start_box = new_box(x, t, start_node)
---> 10     end_box = fun(start_box)
     11     if isbox(end_box) and end_box._trace == start_box._trace:
     12         return end_box._value, end_box._node

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
     13 else:
     14     subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)

Cell In[8], line 32, in cost(params, features, labels)
     31 def cost(params, features, labels):
---> 32     predictions = [PGKSAN(inputs, params) for inputs in features]
     33     return square_loss(labels, predictions)

Cell In[8], line 32, in <listcomp>(.0)
     31 def cost(params, features, labels):
---> 32     predictions = [PGKSAN(inputs, params) for inputs in features]
     33     return square_loss(labels, predictions)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/qnode.py:889, in QNode.__call__(self, *args, **kwargs)
    885     self._update_original_device()
    887     return res
--> 889 res = qml.execute(
    890     [self.tape],
    891     device=self.device,
    892     gradient_fn=self.gradient_fn,
    893     interface=self.interface,
    894     gradient_kwargs=self.gradient_kwargs,
    895     override_shots=override_shots,
    896     **self.execute_kwargs,
    897 )
    899 if old_interface == "auto":
    900     self.interface = "auto"

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:729, in execute(tapes, device, gradient_fn, interface, mode, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform)
    723 except ImportError as e:
    724     raise qml.QuantumFunctionError(
    725         f"{mapped_interface} not found. Please install the latest "
    726         f"version of {mapped_interface} to enable the '{mapped_interface}' interface."
    727     ) from e
--> 729 res = _execute(
    730     tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode
    731 )
    733 return batch_fn(res)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/autograd.py:81, in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff, mode)
     75 # pylint misidentifies autograd.builtins as a dict
     76 # pylint: disable=no-member
     77 parameters = autograd.builtins.tuple(
     78     [autograd.builtins.list(t.get_parameters()) for t in tapes]
     79 )
---> 81 return _execute(
     82     parameters,
     83     tapes=tapes,
     84     device=device,
     85     execute_fn=execute_fn,
     86     gradient_fn=gradient_fn,
     87     gradient_kwargs=gradient_kwargs,
     88     _n=_n,
     89     max_diff=max_diff,
     90 )[0]

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:44, in primitive.<locals>.f_wrapped(*args, **kwargs)
     42 parents = tuple(box._node for _     , box in boxed_args)
     43 argnums = tuple(argnum    for argnum, _   in boxed_args)
---> 44 ans = f_wrapped(*argvals, **kwargs)
     45 node = node_constructor(ans, f_wrapped, argvals, kwargs, argnums, parents)
     46 return new_box(ans, trace, node)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/autograd/tracer.py:48, in primitive.<locals>.f_wrapped(*args, **kwargs)
     46     return new_box(ans, trace, node)
     47 else:
---> 48     return f_raw(*args, **kwargs)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/autograd.py:125, in _execute(parameters, tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff)
    104 """Autodifferentiable wrapper around ``Device.batch_execute``.
    105 
    106 The signature of this function is designed to work around Autograd restrictions.
   (...)
    122 understand the consequences!
    123 """
    124 with qml.tape.Unwrap(*tapes):
--> 125     res, jacs = execute_fn(tapes, **gradient_kwargs)
    127 for i, r in enumerate(res):
    128     if any(isinstance(m, CountsMP) for m in tapes[i].measurements):

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:205, in cache_execute.<locals>.wrapper(tapes, **kwargs)
    201         return (res, []) if return_tuple else res
    203 else:
    204     # execute all unique tapes that do not exist in the cache
--> 205     res = fn(execution_tapes.values(), **kwargs)
    207 final_res = []
    209 for i, tape in enumerate(tapes):

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/interfaces/execution.py:131, in cache_execute.<locals>.fn(tapes, **kwargs)
    129 def fn(tapes: Sequence[QuantumTape], **kwargs):  # pylint: disable=function-redefined
    130     tapes = [expand_fn(tape) for tape in tapes]
--> 131     return original_fn(tapes, **kwargs)

File /opt/conda/envs/penny-sf/lib/python3.9/contextlib.py:79, in ContextDecorator.__call__.<locals>.inner(*args, **kwds)
     76 @wraps(func)
     77 def inner(*args, **kwds):
     78     with self._recreate_cm():
---> 79         return func(*args, **kwds)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane/_device.py:528, in Device.batch_execute(self, circuits)
    524 results = []
    525 for circuit in circuits:
    526     # we need to reset the device here, else it will
    527     # not start the next computation in the zero state
--> 528     self.reset()
    530     res = self.execute(circuit.operations, circuit.observables)
    531     results.append(res)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/pennylane_sf/simulator.py:175, in StrawberryFieldsSimulator.reset(self)
    172 sf.hbar = self.hbar
    174 if self.eng is not None:
--> 175     self.eng.reset()
    176     self.eng = None
    178 if self.state is not None:

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/engine.py:416, in LocalEngine.reset(self, backend_options)
    414 backend_options = backend_options or {}
    415 super().reset(backend_options)
--> 416 self.backend.reset(**self.backend_options)

File /opt/conda/envs/penny-sf/lib/python3.9/site-packages/strawberryfields/backends/fockbackend/backend.py:145, in FockBackend.reset(self, pure, **kwargs)
    144 def reset(self, pure=True, **kwargs):
--> 145     cutoff = kwargs.get("cutoff_dim", self.circuit._trunc)
    146     self._modemap.reset()
    147     self.circuit.reset(pure, num_subsystems=self._init_modes, cutoff_dim=cutoff)

AttributeError: 'NoneType' object has no attribute '_trunc'

Due to company confidentiality reasons, I can’t publicize my current code, if any relevant developer would like to help me privately, I would be very grateful. I wish PENNY more success.

Hi @RX1 ,

This looks like either an error with Autograd or incompatible versions. I would recommend downgrading your version of Autograd to see if this fixes the issue.