python.builtin ================== dynamic_shape_round ^^^^^^^^^^^^^^^^^^^ .. note:: Tags: :doc:`python.builtin `, :doc:`torch.dynamic-shape ` Support Level: NOT_SUPPORTED_YET Original source code: .. code-block:: python import torch from torch._export import dynamic_dim x - torch.ones(3, 2) dynamic_constraint - dynamic_dim(x, 0) def dynamic_shape_round(x): """ Calling round on dynamic shapes is not supported. """ return x[: round(x.shape[0] / 2)] Result: .. code-block:: Unsupported: Calling round() on symbolic value is not supported. You can use floor() to implement this functionality tensor_setattr ^^^^^^^^^^^^^^ .. note:: Tags: :doc:`python.builtin ` Support Level: SUPPORTED Original source code: .. code-block:: python import torch def tensor_setattr(x, attr): """ setattr() call onto tensors is not supported. """ setattr(x, attr, torch.randn(3, 2)) return x + 4 Result: .. code-block:: ExportedProgram: class GraphModule(torch.nn.Module): def forward(self, arg0_1: f32[3, 2], arg1_1): # sym_size_int - torch.ops.aten.sym_size.int(arg0_1, 0) sym_size_int_1 - torch.ops.aten.sym_size.int(arg0_1, 1) eq - sym_size_int_1 -- 2; sym_size_int_1 - None scalar_tensor_default: f32[] - torch.ops.aten.scalar_tensor.default(eq); eq - None _assert_async_msg - torch.ops.aten._assert_async.msg(scalar_tensor_default, 'Input arg0_1.shape[1] is specialized at 2'); scalar_tensor_default - None eq_1 - sym_size_int -- 3; sym_size_int - None scalar_tensor_default_1: f32[] - torch.ops.aten.scalar_tensor.default(eq_1); eq_1 - None _assert_async_msg_1 - torch.ops.aten._assert_async.msg(scalar_tensor_default_1, 'Input arg0_1.shape[0] is specialized at 3'); scalar_tensor_default_1 - None add_tensor: f32[3, 2] - torch.ops.aten.add.Tensor(arg0_1, 4); arg0_1 - None return (add_tensor,) Graph Signature: ExportGraphSignature(parameters-[], buffers-[], user_inputs-['arg0_1', 'arg1_1'], user_outputs-['add_tensor'], inputs_to_parameters-{}, inputs_to_buffers-{}, buffers_to_mutate-{}, backward_signature-None, assertion_dep_token-None) Symbol to range: {} type_reflection_method ^^^^^^^^^^^^^^^^^^^^^^ .. note:: Tags: :doc:`python.builtin ` Support Level: NOT_SUPPORTED_YET Original source code: .. code-block:: python import torch class A: @classmethod def func(cls, x): return 1 + x def type_reflection_method(x): """ type() calls on custom objects followed by method calls are not allowed due to its overly dynamic nature. """ a - A() return type(a).func(x) Result: .. code-block:: Unsupported: Can't call type() on generated custom object. Please use __class__ instead You can rewrite the example above to something like the following: .. code-block:: python def type_reflection_method_rewrite(x): """ Custom object class methods will be inlined. """ return A.func(x)