TypeError: len() of unsized object

def random_params(num_wires):
    """Generate random variational parameters in the shape for the ansatz."""
    return np.random.uniform(0, 2 * np.pi, num_wires, requires_grad=True)

params_init = random_params(14)
print(params_init)

num_modes = 2
cutoff_dim = 10

# select a devide 
dev = qml.device("strawberryfields.fock", wires=num_modes, cutoff_dim=cutoff_dim) 

@qml.qnode(dev, interface="tf")
def PhotonicAttention(data, para):
    qml.Squeezing(data[0], data[1], wires=0)
    qml.Squeezing(data[2], data[3], wires=1)
    qml.Beamsplitter(data[4], data[5], wires=[0,1])
    qml.Rotation(data[6], wires=0)
    qml.Rotation(data[7], wires=1)
    qml.Displacement(data[8], data[9], wires=0)
    qml.Displacement(data[10], data[11], wires=1)
    qml.Kerr(data[12], wires=0)
    qml.Kerr(data[13], wires=1)

    qml.Beamsplitter(para[0], para[1], wires=[0,1])
    qml.Rotation(para[2], wires=0)
    qml.Rotation(para[3], wires=1)
    
    # Squeezers
    qml.Squeezing(para[4], 0.0, wires=0)
    qml.Squeezing(para[5], 0.0, wires=1)
    
    # Interferometer 2
    qml.Beamsplitter(para[6], para[7], wires=[0,1])
    qml.Rotation(para[8], wires=0)
    qml.Rotation(para[9], wires=1)
    
    # Bias addition
    qml.Displacement(para[10], 0.0, wires=0)
    qml.Displacement(para[11], 0.0, wires=1)
    
    # Non-linear activation function
    qml.Kerr(para[12], wires=0)
    qml.Kerr(para[13], wires=1)
    return qml.expval(qml.NumberOperator(1))

#qml.about()

qml.drawer.use_style('black_white')
fig, ax = qml.draw_mpl(PhotonicAttention, show_all_wires=True)(feats_train[0], params_init)
fig.set_size_inches((7, 5))
print(qml.specs(PhotonicAttention)(feats_train[0], params_init))

def variational_classifier(params, feat):
    return PhotonicAttention(feat, params)


def cost(params, features, labels):
    predictions = [variational_classifier(params, feat) for feat in features]
    return square_loss(labels, predictions)

def TrainQKSAN(params_init):
    opt = qml.NesterovMomentumOptimizer(0.05)
    batch_size = 30
    weights = params_init
    #bias = np.array(0.0, requires_grad=True)
    his_acc_val = []
    his_acc_train = []
    his_cost = []
    f1_train = []
    f1_test = []
    roc_auc_train = []
    roc_auc_test = []
    for it in range(120):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, 2 * train_size, (batch_size,))
        feats_train_batch = feats_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        
        weights, _, _ = opt.step(cost, weights, feats_train_batch, Y_train_batch)
        #(weights, _, _), _cost = opt.step_and_cost(cost, weights, feats_train_batch, Y_train_batch)
        # Compute predictions on train and validation set
        predictions_train = [np.sign(variational_classifier(weights, feat)) for feat in feats_train]
        predictions_val = [np.sign(variational_classifier(weights, feat)) for feat in feats_val]
        #print(probs(Y_train, predictions_val))
        c = cost(weights, features, Y).numpy()
        #print(c)
        # Compute accuracy on train and validation set
        acc_train = accuracy_score(Y_train, predictions_train)
        acc_val = accuracy_score(Y_val, predictions_val)
        a = f1_score(Y_train, predictions_train)
        b = f1_score(Y_val, predictions_val)
        
        print("f1_score is", a, b)
        d = roc_auc_score(Y_train, predictions_train)
        e = roc_auc_score(Y_val, predictions_val)
        print("roc_auc_score is", d, e)


        f1_train.append(a)
        f1_test.append(b)
        roc_auc_train.append(d)
        roc_auc_test.append(e)
        his_acc_val.append(acc_val)
        his_acc_train.append(acc_train)
        his_cost.append(c)
        print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
            "".format(it + 1, c, acc_train, acc_val))
        
     return his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test

his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test = TrainQKSAN(params_init)

print(his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test)

--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[11], line 1 ----> 1 his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test = TrainQKSAN(params_init) 3 print(his_acc_val, his_acc_train, his_cost, f1_train, f1_test, roc_auc_train, roc_auc_test) Cell In[10], line 28, in TrainQKSAN(params_init) 25 feats_train_batch = feats_train[batch_index] 26 Y_train_batch = Y_train[batch_index] ---> 28 weights, _, _ = opt.step(cost, weights, feats_train_batch, Y_train_batch) 29 #(weights, _, _), _cost = opt.step_and_cost(cost, weights, feats_train_batch, Y_train_batch) 30 # Compute predictions on train and validation set 31 predictions_train = [np.sign(variational_classifier(weights, feat)) for feat in feats_train] File [d:\miniconda3\lib\site-packages\pennylane\optimize\gradient_descent.py:93](file:///D:/miniconda3/lib/site-packages/pennylane/optimize/gradient_descent.py#line=92), in GradientDescentOptimizer.step(self, objective_fn, grad_fn, *args, **kwargs) 75 def step(self, objective_fn, *args, grad_fn=None, **kwargs): 76 """Update trainable arguments with one step of the optimizer. 77 78 Args: (...) 90 If single arg is provided, list [array] is replaced by array. 91 """ ---> 93 g, _ = self.compute_grad(objective_fn, args, kwargs, grad_fn=grad_fn) 94 new_args = self.apply_grad(g, args) 96 # unwrap from list if one argument, cleaner return File [d:\miniconda3\lib\site-packages\pennylane\optimize\nesterov_momentum.py:76](file:///D:/miniconda3/lib/site-packages/pennylane/optimize/nesterov_momentum.py#line=75), in NesterovMomentumOptimizer.compute_grad(self, objective_fn, args, kwargs, grad_fn) 73 shifted_args[index] = args[index] - self.momentum * self.accumulation[index] 75 g = get_gradient(objective_fn) if grad_fn is None else grad_fn ---> 76 grad = g(*shifted_args, **kwargs) 77 forward = getattr(g, "forward", None) 79 grad = (grad,) if len(trainable_indices) == 1 else grad File [d:\miniconda3\lib\site-packages\pennylane\_grad.py:165](file:///D:/miniconda3/lib/site-packages/pennylane/_grad.py#line=164), in grad.__call__(self, *args, **kwargs) 162 self._forward = self._fun(*args, **kwargs) 163 return () --> 165 grad_value, ans = grad_fn(*args, **kwargs) # pylint: disable=not-callable 166 self._forward = ans 168 return grad_value File [d:\miniconda3\lib\site-packages\autograd\wrap_util.py:20](file:///D:/miniconda3/lib/site-packages/autograd/wrap_util.py#line=19), in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs) 18 else: 19 x = tuple(args[i] for i in argnum) ---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs) File [d:\miniconda3\lib\site-packages\pennylane\_grad.py:183](file:///D:/miniconda3/lib/site-packages/pennylane/_grad.py#line=182), in grad._grad_with_forward(fun, x) 177 @staticmethod 178 @unary_to_nary 179 def _grad_with_forward(fun, x): 180 """This function is a replica of ``autograd.grad``, with the only 181 difference being that it returns both the gradient *and* the forward pass 182 value.""" --> 183 vjp, ans = _make_vjp(fun, x) # pylint: disable=redefined-outer-name 185 if vspace(ans).size != 1: 186 raise TypeError( 187 "Grad only applies to real scalar-output functions. " 188 "Try jacobian, elementwise_grad or holomorphic_grad." 189 ) File [d:\miniconda3\lib\site-packages\autograd\core.py:10](file:///D:/miniconda3/lib/site-packages/autograd/core.py#line=9), in make_vjp(fun, x) 8 def make_vjp(fun, x): 9 start_node = VJPNode.new_root() ---> 10 end_value, end_node = trace(start_node, fun, x) 11 if end_node is None: 12 def vjp(g): return vspace(x).zeros() File [d:\miniconda3\lib\site-packages\autograd\tracer.py:10](file:///D:/miniconda3/lib/site-packages/autograd/tracer.py#line=9), in trace(start_node, fun, x) 8 with trace_stack.new_trace() as t: 9 start_box = new_box(x, t, start_node) ---> 10 end_box = fun(start_box) 11 if isbox(end_box) and end_box._trace == start_box._trace: 12 return end_box._value, end_box._node File [d:\miniconda3\lib\site-packages\autograd\wrap_util.py:15](file:///D:/miniconda3/lib/site-packages/autograd/wrap_util.py#line=14), in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x) 13 else: 14 subargs = subvals(args, zip(argnum, x)) ---> 15 return fun(*subargs, **kwargs) Cell In[10], line 6, in cost(params, features, labels) 5 def cost(params, features, labels): ----> 6 predictions = [variational_classifier(params, feat) for feat in features] 7 return square_loss(labels, predictions) Cell In[10], line 6, in <listcomp>(.0) 5 def cost(params, features, labels): ----> 6 predictions = [variational_classifier(params, feat) for feat in features] 7 return square_loss(labels, predictions) Cell In[10], line 2, in variational_classifier(params, feat) 1 def variational_classifier(params, feat): ----> 2 return PhotonicAttention(feat, params) File [d:\miniconda3\lib\site-packages\pennylane\qnode.py:1039](file:///D:/miniconda3/lib/site-packages/pennylane/qnode.py#line=1038), in QNode.__call__(self, *args, **kwargs) 1034 full_transform_program._set_all_argnums( 1035 self, args, kwargs, argnums 1036 ) # pylint: disable=protected-access 1038 # pylint: disable=unexpected-keyword-arg -> 1039 res = qml.execute( 1040 (self._tape,), 1041 device=self.device, 1042 gradient_fn=self.gradient_fn, 1043 interface=self.interface, 1044 transform_program=full_transform_program, 1045 config=config, 1046 gradient_kwargs=self.gradient_kwargs, 1047 override_shots=override_shots, 1048 **self.execute_kwargs, 1049 ) 1051 res = res[0] 1053 # convert result to the interface in case the qfunc has no parameters File [d:\miniconda3\lib\site-packages\pennylane\interfaces\execution.py:810](file:///D:/miniconda3/lib/site-packages/pennylane/interfaces/execution.py#line=809), in execute(tapes, device, gradient_fn, interface, transform_program, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp) 808 results = ml_boundary_execute(tapes, execute_fn, jpc, device=device) 809 else: --> 810 results = ml_boundary_execute( 811 tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff 812 ) 814 return post_processing(results) File [d:\miniconda3\lib\site-packages\pennylane\interfaces\tensorflow.py:322](file:///D:/miniconda3/lib/site-packages/pennylane/interfaces/tensorflow.py#line=321), in execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n, max_diff) 318 return (vjps, variables) if variables is not None else vjps 320 return res, grad_fn --> 322 return _execute(*parameters) File [d:\miniconda3\lib\site-packages\tensorflow\python\ops\custom_gradient.py:343](file:///D:/miniconda3/lib/site-packages/tensorflow/python/ops/custom_gradient.py#line=342), in Bind.__call__(self, *a, **k) 342 def __call__(self, *a, **k): --> 343 return self._d(self._f, a, k) File [d:\miniconda3\lib\site-packages\tensorflow\python\ops\custom_gradient.py:297](file:///D:/miniconda3/lib/site-packages/tensorflow/python/ops/custom_gradient.py#line=296), in custom_gradient.<locals>.decorated(wrapped, args, kwargs) 295 """Decorated function with custom gradient.""" 296 if context.executing_eagerly(): --> 297 return _eager_mode_decorator(wrapped, args, kwargs) 298 else: 299 return _graph_mode_decorator(wrapped, args, kwargs) File [d:\miniconda3\lib\site-packages\tensorflow\python\ops\custom_gradient.py:568](file:///D:/miniconda3/lib/site-packages/tensorflow/python/ops/custom_gradient.py#line=567), in _eager_mode_decorator(f, args, kwargs) 565 # TODO(apassos) consider removing the identity below. 566 flat_result = [gen_array_ops.identity(x) for x in flat_result] --> 568 input_tensors = [ 569 ops.convert_to_tensor(x) for x in flat_args + list(variables)] 571 recorded_inputs = input_tensors 572 arg_count = len(flat_args) File [d:\miniconda3\lib\site-packages\tensorflow\python\ops\custom_gradient.py:569](file:///D:/miniconda3/lib/site-packages/tensorflow/python/ops/custom_gradient.py#line=568), in <listcomp>(.0) 565 # TODO(apassos) consider removing the identity below. 566 flat_result = [gen_array_ops.identity(x) for x in flat_result] 568 input_tensors = [ --> 569 ops.convert_to_tensor(x) for x in flat_args + list(variables)] 571 recorded_inputs = input_tensors 572 arg_count = len(flat_args) File [d:\miniconda3\lib\site-packages\tensorflow\python\profiler\trace.py:183](file:///D:/miniconda3/lib/site-packages/tensorflow/python/profiler/trace.py#line=182), in trace_wrapper.<locals>.inner_wrapper.<locals>.wrapped(*args, **kwargs) 181 with Trace(trace_name, **trace_kwargs): 182 return func(*args, **kwargs) --> 183 return func(*args, **kwargs) File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\ops.py:696](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/ops.py#line=695), in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types) 694 # TODO(b/142518781): Fix all call-sites and remove redundant arg 695 preferred_dtype = preferred_dtype or dtype_hint --> 696 return tensor_conversion_registry.convert( 697 value, dtype, name, as_ref, preferred_dtype, accepted_result_types 698 ) File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\tensor_conversion_registry.py:234](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/tensor_conversion_registry.py#line=233), in convert(value, dtype, name, as_ref, preferred_dtype, accepted_result_types) 225 raise RuntimeError( 226 _add_error_prefix( 227 f"Conversion function {conversion_func!r} for type " (...) 230 f"actual = {ret.dtype.base_dtype.name}", 231 name=name)) 233 if ret is None: --> 234 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) 236 if ret is NotImplemented: 237 continue File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:335](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/constant_op.py#line=334), in _constant_tensor_conversion_function(v, dtype, name, as_ref) 332 def _constant_tensor_conversion_function(v, dtype=None, name=None, 333 as_ref=False): 334 _ = as_ref --> 335 return constant(v, dtype=dtype, name=name) File [d:\miniconda3\lib\site-packages\tensorflow\python\ops\weak_tensor_ops.py:142](file:///D:/miniconda3/lib/site-packages/tensorflow/python/ops/weak_tensor_ops.py#line=141), in weak_tensor_binary_op_wrapper.<locals>.wrapper(*args, **kwargs) 140 def wrapper(*args, **kwargs): 141 if not ops.is_auto_dtype_conversion_enabled(): --> 142 return op(*args, **kwargs) 143 bound_arguments = signature.bind(*args, **kwargs) 144 bound_arguments.apply_defaults() File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:271](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/constant_op.py#line=270), in constant(value, dtype, shape, name) 172 @tf_export("constant", v1=[]) 173 def constant( 174 value, dtype=None, shape=None, name="Const" 175 ) -> Union[ops.Operation, ops._EagerTensorBase]: 176 """Creates a constant tensor from a tensor-like object. 177 178 Note: All eager tf.Tensor` values are immutable (in contrast to (…) 269 ValueError: if called on a symbolic tensor. 270 “”" → 271 return _constant_impl(value, dtype, shape, name, verify_shape=False, 272 allow_broadcast=True) File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:284](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/constant_op.py#line=283), in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast) 282 with trace.Trace(“tf.constant”): 283 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape) → 284 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape) 286 const_tensor = ops._create_graph_constant( # pylint: disable=protected-access 287 value, dtype, shape, name, verify_shape, allow_broadcast 288 ) 289 return const_tensor File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:296](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/constant_op.py#line=295), in _constant_eager_impl(ctx, value, dtype, shape, verify_shape) 292 def _constant_eager_impl( 293 ctx, value, dtype, shape, verify_shape 294 ) → ops._EagerTensorBase: 295 “”“Creates a constant on the current device.”“” → 296 t = convert_to_eager_tensor(value, ctx, dtype) 297 if shape is None: 298 return t File [d:\miniconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:103](file:///D:/miniconda3/lib/site-packages/tensorflow/python/framework/constant_op.py#line=102), in convert_to_eager_tensor(value, ctx, dtype) 101 dtype = dtypes.as_dtype(dtype).as_datatype_enum 102 ctx.ensure_initialized() → 103 return ops.EagerTensor(value, ctx.device_name, dtype)

ValueError: TypeError: len() of unsized object Traceback (most recent call last): File “[d:\miniconda3\lib\site-packages\autograd\numpy\numpy_boxes.py”, line 22](file:///D:/miniconda3/lib/site-packages/autograd/numpy/numpy_boxes.py#line=21), in len def len(self): return len(self._value)
TypeError: len() of unsized object`

`

Hey @RX1,

Can you reduce this down to something much smaller that reproduces what you’re seeing?