-
Notifications
You must be signed in to change notification settings - Fork 527
/
Copy path_config.py
104 lines (85 loc) · 4.18 KB
/
_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
import torch
from executorch.exir.dynamic_shape import DynamicMemoryPlanningMode
from executorch.exir.pass_manager import PassType
from executorch.exir.passes import MemoryPlanningPass, ToOutVarPass
from executorch.exir.passes.sym_shape_eval_pass import ConstraintBasedSymShapeEvalPass
from executorch.exir.tracer import ExirDynamoConfig
from torch.fx._compatibility import compatibility
@compatibility(is_backward_compatible=False)
@dataclass
class CaptureConfig:
pt2_mode: bool = True
enable_functionalization: bool = True
enable_dynamic_shape: bool = False # This flag does nothing if enable_aot is True
enable_aot: bool = (
False # When it's true it implies automatic dynamic shapes via default dynamo config
)
_dynamo_config: "ExirDynamoConfig" = field(default_factory=ExirDynamoConfig)
_unlift: bool = False # This flag does nothing if enable_aot is False.
_use_old_decomp_table: bool = False
@compatibility(is_backward_compatible=False)
@dataclass
class EdgeCompileConfig:
# TODO(qihan): remove ability to opt out
_check_ir_validity: bool = True
# TODO(larryliu): remove this
_use_edge_ops: bool = True
# Allow core ATen ops check to be skipped for certain ops, but continue with the rest of the checks.
_core_aten_ops_exception_list: List[torch._ops.OpOverload] = field(
default_factory=list
)
_skip_type_promotion: bool = False
# TODO(gasoonjia): remove this
_skip_dim_order: bool = False
@compatibility(is_backward_compatible=False)
@dataclass
class ExecutorchBackendConfig:
passes: List[PassType] = field(default_factory=list)
# A single memory planning pass can be defined for all the programs in the
# EdgeProgramManager or can be defined per program.
memory_planning_pass: Union[PassType, Dict[str, PassType]] = MemoryPlanningPass()
to_out_var_pass: PassType = ToOutVarPass(ignore_to_out_var_failure=False)
dynamic_memory_planning_mode: DynamicMemoryPlanningMode = (
DynamicMemoryPlanningMode.UPPER_BOUND
)
emit_stacktrace: bool = False
# Whether to move delegate data blobs from the Program into separate
# segments, rather than encoding those blobs in the flatbuffer data.
# This makes it possible to free those blobs at runtime.
extract_delegate_segments: bool = True
# When extracting segments, the starting offset of each segment will be
# aligned to this value (in bytes). Must be a power of two.
segment_alignment: int = 128
# If provided, the minimum alignment of tensor buffers in the program. Must
# be a power of 2. If not provided, uses the value in the schema file.
constant_tensor_alignment: Optional[int] = None
# If provided, the minimum alignment of delegate data in the program. Must
# be a power of 2. If not provided, uses the value in the schema file.
delegate_alignment: Optional[int] = None
# A single sym shape eval pass can be defined for all the programs in the
# EdgeProgramManager or can be defined per program.
sym_shape_eval_pass: Union[PassType, Dict[str, PassType]] = (
ConstraintBasedSymShapeEvalPass()
)
# If set to true, view_copy operations will be converted to lightweight
# view operations in the ET runtime
# Moreover, static views will be elided from the ExecuTorch graph
remove_view_copy: bool = True
# If set to true, all constant tensors will be stored in a separate file,
# external to the PTE file.
external_constants: bool = False
# If set to true, all trainable weights will be stored in a separate file,
# external to the PTE file.
external_mutable_weights: bool = False
# If set to true, all mutable buffers will have their fully qualified names
# serialized in the PTE file. Its value is ignored if mutable buffers are not
# memory planned as the names must be serialized in that case.
emit_mutable_buffer_names: bool = False