Skip to content

Commit 2d35b30

Browse files
mergennachinfacebook-github-bot
authored andcommitted
Update lintrunner libraries (#3963)
Summary: Pull Request resolved: #3963 Reviewed By: kirklandsign Differential Revision: D58479009 fbshipit-source-id: 31624ace3ccb6ca8c6de6497f8b239eea8d31ad4
1 parent 22b063d commit 2d35b30

File tree

13 files changed

+35
-32
lines changed

13 files changed

+35
-32
lines changed

backends/arm/tosa_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def dbg_node(node):
3333
logger.info(" node.meta = ")
3434
for k, v in node.meta.items():
3535
logger.info(f" '{k}' = {v}")
36-
if type([]) == type(v):
36+
if isinstance(v, list):
3737
for i in v:
3838
logger.info(f" {i} ")
3939

backends/qualcomm/passes/convert_to_linear.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,11 @@ def _convert(self, graph_module: torch.fx.GraphModule):
192192
for _, src_partitions in partitions.items():
193193
for src_partition in src_partitions:
194194
op_cnt = Counter(
195-
[n.target for n in src_partition.nodes if type(n.target) == edge_op]
195+
[
196+
n.target
197+
for n in src_partition.nodes
198+
if isinstance(n.target, edge_op)
199+
]
196200
)
197201
if self.linear in op_cnt:
198202
continue

backends/qualcomm/passes/fold_qdq.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def _fold(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
4545

4646
# collecting quant nodes to be removed
4747
for i in range(1, len(n.args)):
48-
if type(n.args[i]) == torch.fx.node.Node:
48+
if isinstance(n.args[i], torch.fx.node.Node):
4949
to_be_removed.append(n.args[i])
5050
# could be a commonly shared attribute between q & dq
5151
if n.args[i].target == exir_ops.edge.aten._to_copy.default:

backends/qualcomm/passes/insert_io_qdq.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,9 @@ def _ceate_args(self, target: torch.fx.node.Target, quant_attrs: Dict):
4747
if name == "out_dtype":
4848
continue
4949
value = quant_attrs[name]
50-
if type(arg_schema.type) == torch.tensor and type(value) in [int, float]:
50+
if isinstance(arg_schema.type, torch.tensor) and (
51+
isinstance(value, int) or isinstance(value, float)
52+
):
5153
value = torch.tensor(value)
5254
ret.append(value)
5355
return ret

backends/qualcomm/passes/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def get_quant_attrs(
3232
attr_n = quant_node.args[i]
3333

3434
value = attr_n
35-
if type(attr_n) == torch.fx.node.Node:
35+
if isinstance(attr_n, torch.fx.node.Node):
3636
# could be a commonly shared attribute between q & dq
3737
if attr_n.target == exir_ops.edge.aten._to_copy.default:
3838
value = get_parameter(attr_n.args[0], edge_program)

backends/qualcomm/quantizer/quantizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ def _get_quant_config(self, op: str | OpOverload) -> Optional[QuantizationConfig
295295
1. is one of use_per_channel_weight_quant_ops
296296
2. int8 / int16 config
297297
"""
298-
if type(op) == str:
298+
if isinstance(op, str):
299299
return
300300

301301
if op in self.use_per_channel_weight_quant_ops:

examples/qualcomm/scripts/dummy_llama2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def create_device_inputs(example_inputs, use_kv_cache):
2727
input_list = ""
2828
if use_kv_cache:
2929
for i, d in enumerate(inputs[0]):
30-
if type(d) == list:
30+
if isinstance(d, list):
3131
d = torch.stack(d)
3232
d.numpy().tofile(f"{args.artifact}/input_0_0.raw")
3333
input_list = f"input_0_{i}.raw "

exir/emit/_emitter.py

+9-10
Original file line numberDiff line numberDiff line change
@@ -285,18 +285,17 @@ def _emit_list(self, val: List[_Argument], val_type: _SchemaType) -> EValue:
285285
286286
NOTE: When symbool and symfloat are supported bool and float lists will be stored boxed.
287287
"""
288-
elem_type = type(val_type)
289288

290-
if elem_type == torch.BoolType:
289+
if isinstance(val_type, torch.BoolType):
291290
return EValue(BoolList(typing.cast(List[bool], val)))
292291

293-
if elem_type == torch.IntType:
292+
if isinstance(val_type, torch.IntType):
294293
return self._emit_int_list(val)
295294

296-
if elem_type == torch.FloatType:
295+
if isinstance(val_type, torch.FloatType):
297296
return EValue(DoubleList(typing.cast(List[float], val)))
298297

299-
if elem_type == torch.TensorType:
298+
if isinstance(val_type, torch.TensorType):
300299
values = []
301300
for v in val:
302301
assert isinstance(v, _AbstractValue)
@@ -308,10 +307,10 @@ def _emit_list(self, val: List[_Argument], val_type: _SchemaType) -> EValue:
308307
values.append(v.id)
309308
return EValue(TensorList(values))
310309

311-
if elem_type == torch.OptionalType:
310+
if isinstance(val_type, torch.OptionalType):
312311
# refine further
313-
actual_type = typing.cast(torch.OptionalType, val_type).getElementType()
314-
if type(actual_type) == torch.TensorType:
312+
actual_type = val_type.getElementType()
313+
if isinstance(actual_type, torch.TensorType):
315314
vals = []
316315
for v in val:
317316
if v is None:
@@ -437,9 +436,9 @@ def _constant_to_evalue( # noqa: C901
437436
val_type = torch.ListType(
438437
self._get_list_tuple_jit_type(val) # pyre-ignore
439438
)
440-
if type(val_type) == torch.OptionalType:
439+
if isinstance(val_type, torch.OptionalType):
441440
val_type = val_type.getElementType()
442-
assert type(val_type) == torch.ListType
441+
assert isinstance(val_type, torch.ListType)
443442
return self._emit_list(
444443
typing.cast(List[_Argument], val),
445444
typing.cast(_SchemaType, val_type.getElementType()),

exir/passes/remove_mixed_type_operators.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def call_operator(self, op, args, kwargs, meta: NodeMetadata): # noqa: C901
6161
)[1]
6262

6363
def try_coerce(value: PyTree, arg: torch.Argument) -> PyTree:
64-
if type(arg.type) != torch.TensorType:
64+
if not isinstance(arg.type, torch.TensorType):
6565
return value
6666

6767
if isinstance(value, ProxyValue):

exir/passes/scalar_to_tensor_pass.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def try_coerce(value, arg):
2424
return (
2525
torch.tensor(value)
2626
if isinstance(value, (float, int, bool))
27-
and type(arg.type) == torch.TensorType
27+
and isinstance(arg.type, torch.TensorType)
2828
else value
2929
)
3030

exir/tracer.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ def __torch_dispatch__( # noqa: C901
345345

346346
# Kind of a hacky way to test if an op is in-place or not
347347
if func.__name__[-1] == "_" and func.__name__[0] != "_":
348-
if type(args[0]) == PythonTensor:
348+
if isinstance(args[0], PythonTensor):
349349
args[0].proxy = proxy_out
350350

351351
if not torch.fx.traceback.has_preserved_node_meta():
@@ -361,13 +361,13 @@ def wrap_with_proxy(e: LeafValue, proxy: torch.fx.Proxy) -> LeafValue:
361361
if e is None:
362362
e = torch.empty(())
363363

364-
if type(e) == torch.Tensor:
364+
if isinstance(e, torch.Tensor):
365365
return PythonTensor(e, proxy)
366366

367367
# Inplace and out-variant ops may return one of their arguments, which is already
368368
# a PythonTensor. In this case, we need to update the PythonTensor's associated
369369
# proxy to the newly created proxy.
370-
if type(e) == PythonTensor:
370+
if isinstance(e, PythonTensor):
371371
e.update_proxy(proxy)
372372
return e
373373

requirements-lintrunner.txt

+6-6
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,18 @@ lintrunner==0.11.0
33
lintrunner-adapters==0.11.0
44

55
# Flake 8 and its dependencies
6-
flake8==6.0.0
6+
flake8==6.1.0
77
flake8-breakpoint==1.1.0
8-
flake8-bugbear==23.6.5
9-
flake8-comprehensions==3.12.0
8+
flake8-bugbear==23.9.16
9+
flake8-comprehensions==3.14.0
1010
flake8-pyi==23.5.0
1111
mccabe==0.7.0
12-
pycodestyle==2.10.0
12+
pycodestyle==2.11.1
1313
torchfix==0.5.0
1414

1515
# UFMT
16-
black==24.2.0
17-
ufmt==2.5.1
16+
black==24.4.2
17+
ufmt==2.6.0
1818
usort==1.0.5
1919

2020
# Other linters

sdk/bundled_program/test/test_config.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,8 @@ def assertIOListEqual(
3131
) -> None:
3232
self.assertEqual(len(tl1), len(tl2))
3333
for t1, t2 in zip(tl1, tl2):
34-
if type(t1) == torch.Tensor:
35-
assert type(t1) == type(t2)
36-
# pyre-fixme[6]: For 2nd argument expected `Tensor` but got
37-
# `Union[bool, float, int, Tensor]`.
34+
if isinstance(t1, torch.Tensor):
35+
assert isinstance(t2, torch.Tensor)
3836
self.assertTensorEqual(t1, t2)
3937
else:
4038
self.assertTrue(t1 == t2)

0 commit comments

Comments
 (0)