Skip to content

Commit 8530c65

Browse files
zertoshfacebook-github-bot
authored andcommitted
[codemod][fbcode/caffe2] Apply clang-format update fixes
Test Plan: Sandcastle and visual inspection. Reviewed By: igorsugak Differential Revision: D25849205 fbshipit-source-id: ef664c1ad4b3ee92d5c020a5511b4ef9837a09a0
1 parent d4c1684 commit 8530c65

File tree

141 files changed

+615
-448
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

141 files changed

+615
-448
lines changed

test/cpp/jit/test_alias_analysis.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -906,14 +906,15 @@ graph():
906906
}
907907

908908
TEST(WildcardsTest, Basic) {
909-
RegisterOperators reg({Operator(
910-
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
911-
[](Stack* stack) {},
912-
aliasAnalysisFromSchema()),
913-
Operator(
914-
"prim::writes(Tensor(z!) a) -> Tensor(a)",
915-
[](Stack* stack) {},
916-
aliasAnalysisFromSchema())});
909+
RegisterOperators reg(
910+
{Operator(
911+
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
912+
[](Stack* stack) {},
913+
aliasAnalysisFromSchema()),
914+
Operator(
915+
"prim::writes(Tensor(z!) a) -> Tensor(a)",
916+
[](Stack* stack) {},
917+
aliasAnalysisFromSchema())});
917918
const auto returns_wildcard =
918919
Symbol::fromQualString("prim::returns_wildcard");
919920
const auto writes = Symbol::fromQualString("prim::writes");

test/cpp/jit/test_argument_spec.cpp

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -50,21 +50,23 @@ TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) {
5050
auto const GF = at::CUDA(at::kFloat);
5151
auto const GD = at::CUDA(at::kDouble);
5252

53-
auto list = createStack({var(CF, {1}, true),
54-
var(CD, {1, 2}, false),
55-
var(GF, {}, true),
56-
var(GD, {4, 5, 6}, false),
57-
undef()});
53+
auto list = createStack(
54+
{var(CF, {1}, true),
55+
var(CD, {1, 2}, false),
56+
var(GF, {}, true),
57+
var(GD, {4, 5, 6}, false),
58+
undef()});
5859

5960
// make sure we have some non-standard strides
6061
list[1].toTensor().transpose_(0, 1);
6162

6263
// same list but different backing values
63-
auto list2 = createStack({var(CF, {1}, true),
64-
var(CD, {1, 2}, false),
65-
var(GF, {}, true),
66-
var(GD, {4, 5, 6}, false),
67-
undef()});
64+
auto list2 = createStack(
65+
{var(CF, {1}, true),
66+
var(CD, {1, 2}, false),
67+
var(GF, {}, true),
68+
var(GD, {4, 5, 6}, false),
69+
undef()});
6870
list2[1].toTensor().transpose_(0, 1);
6971

7072
CompleteArgumentSpec a(true, list);
@@ -142,21 +144,23 @@ TEST(ArgumentSpecTest, Basic_CUDA) {
142144

143145
ArgumentSpecCreator arg_spec_creator(*graph);
144146

145-
auto list = createStack({var(CF, {1}, true),
146-
var(CD, {1, 2}, false),
147-
var(GF, {}, true),
148-
var(GD, {4, 5, 6}, false),
149-
undef()});
147+
auto list = createStack(
148+
{var(CF, {1}, true),
149+
var(CD, {1, 2}, false),
150+
var(GF, {}, true),
151+
var(GD, {4, 5, 6}, false),
152+
undef()});
150153

151154
// make sure we have some non-standard strides
152155
list[1].toTensor().transpose_(0, 1);
153156

154157
// same list but different backing values
155-
auto list2 = createStack({var(CF, {1}, true),
156-
var(CD, {1, 2}, false),
157-
var(GF, {}, true),
158-
var(GD, {4, 5, 6}, false),
159-
undef()});
158+
auto list2 = createStack(
159+
{var(CF, {1}, true),
160+
var(CD, {1, 2}, false),
161+
var(GF, {}, true),
162+
var(GD, {4, 5, 6}, false),
163+
undef()});
160164
list2[1].toTensor().transpose_(0, 1);
161165

162166
ArgumentSpec a = arg_spec_creator.create(true, list);

test/cpp/jit/test_gpu.cpp

Lines changed: 69 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -2681,12 +2681,13 @@ TEST(NVFuserTest, FusionBinaryOps_CUDA) {
26812681
using OpTuple = std::tuple<AtenFuncSig, BinaryOpType, std::string>;
26822682

26832683
// see [Note: explicit tuple type for uniform initialization list]
2684-
std::vector<OpTuple> logic_ops{OpTuple{at::eq, BinaryOpType::Eq, "eq"},
2685-
OpTuple{at::ge, BinaryOpType::GE, "ge"},
2686-
OpTuple{at::gt, BinaryOpType::GT, "gt"},
2687-
OpTuple{at::le, BinaryOpType::LE, "le"},
2688-
OpTuple{at::lt, BinaryOpType::LT, "lt"},
2689-
OpTuple{at::ne, BinaryOpType::NE, "ne"}};
2684+
std::vector<OpTuple> logic_ops{
2685+
OpTuple{at::eq, BinaryOpType::Eq, "eq"},
2686+
OpTuple{at::ge, BinaryOpType::GE, "ge"},
2687+
OpTuple{at::gt, BinaryOpType::GT, "gt"},
2688+
OpTuple{at::le, BinaryOpType::LE, "le"},
2689+
OpTuple{at::lt, BinaryOpType::LT, "lt"},
2690+
OpTuple{at::ne, BinaryOpType::NE, "ne"}};
26902691

26912692
std::for_each(logic_ops.begin(), logic_ops.end(), [](OpTuple& op) {
26922693
test_op(
@@ -4184,13 +4185,14 @@ TEST(NVFuserTest, FusionSoftmax1DNormalized_CUDA) {
41844185
sub_tv3->computeAt(sum_exp_rf_tv9, -1);
41854186
sub_tv3_copy->computeAt(output_tv7, -1);
41864187

4187-
TensorView* tensors_to_parallelize[] = {max_val_tv1,
4188-
bcast_max_tv2,
4189-
sum_exp_tv5,
4190-
bcast_sum_tv6,
4191-
output_tv7,
4192-
max_val_rf_tv8,
4193-
sum_exp_rf_tv9};
4188+
TensorView* tensors_to_parallelize[] = {
4189+
max_val_tv1,
4190+
bcast_max_tv2,
4191+
sum_exp_tv5,
4192+
bcast_sum_tv6,
4193+
output_tv7,
4194+
max_val_rf_tv8,
4195+
sum_exp_rf_tv9};
41944196

41954197
for (auto tv : tensors_to_parallelize) {
41964198
tv->axis(-1)->parallelize(ParallelType::TIDx);
@@ -4318,13 +4320,14 @@ TEST(NVFuserTest, FusionSoftmax3DNormalized_CUDA) {
43184320
sub_tv3->computeAt(sum_exp_rf_tv9, -1);
43194321
sub_tv3_copy->computeAt(output_tv7, -1);
43204322

4321-
TensorView* tensors_to_parallelize[] = {max_val_tv1,
4322-
bcast_max_tv2,
4323-
sum_exp_tv5,
4324-
bcast_sum_tv6,
4325-
output_tv7,
4326-
max_val_rf_tv8,
4327-
sum_exp_rf_tv9};
4323+
TensorView* tensors_to_parallelize[] = {
4324+
max_val_tv1,
4325+
bcast_max_tv2,
4326+
sum_exp_tv5,
4327+
bcast_sum_tv6,
4328+
output_tv7,
4329+
max_val_rf_tv8,
4330+
sum_exp_rf_tv9};
43284331

43294332
for (auto tv : tensors_to_parallelize) {
43304333
tv->axis(0)->parallelize(ParallelType::BIDx);
@@ -5931,15 +5934,16 @@ TEST(NVFuserTest, FusionSmemDynamicPersistentSoftmax2D_CUDA) {
59315934
cache_x->setMemoryType(MemoryType::Shared);
59325935
exp->setMemoryType(MemoryType::Shared);
59335936

5934-
std::vector<TensorView*> all_tensors({x,
5935-
cache_x,
5936-
max_val,
5937-
bcast_max,
5938-
x_max_sub,
5939-
exp,
5940-
sum_exp,
5941-
bcast_sum,
5942-
softmax});
5937+
std::vector<TensorView*> all_tensors(
5938+
{x,
5939+
cache_x,
5940+
max_val,
5941+
bcast_max,
5942+
x_max_sub,
5943+
exp,
5944+
sum_exp,
5945+
bcast_sum,
5946+
softmax});
59435947

59445948
auto tidx = new Int();
59455949
fusion.addInput(tidx);
@@ -6168,25 +6172,27 @@ TEST(NVFuserTest, FusionPersistentBatchNormLocalShared_CUDA) {
61686172
std::vector<TensorView*> common_tensors(
61696173
{x_sum, x_sum_bcast, x_mean, var_sum, var_sum_bcast, var, var_eps, rvar});
61706174

6171-
std::vector<TensorView*> static_tensors({sx,
6172-
sx_cache,
6173-
sx_sum,
6174-
sx_mean_sub,
6175-
sx_mean_sub_pow,
6176-
sx_var_sum,
6177-
sx_norm,
6178-
sx_norm_gamma,
6179-
sx_norm_gamma_beta});
6180-
6181-
std::vector<TensorView*> dynamic_tensors({dx,
6182-
dx_cache,
6183-
dx_sum,
6184-
dx_mean_sub,
6185-
dx_mean_sub_pow,
6186-
dx_var_sum,
6187-
dx_norm,
6188-
dx_norm_gamma,
6189-
dx_norm_gamma_beta});
6175+
std::vector<TensorView*> static_tensors(
6176+
{sx,
6177+
sx_cache,
6178+
sx_sum,
6179+
sx_mean_sub,
6180+
sx_mean_sub_pow,
6181+
sx_var_sum,
6182+
sx_norm,
6183+
sx_norm_gamma,
6184+
sx_norm_gamma_beta});
6185+
6186+
std::vector<TensorView*> dynamic_tensors(
6187+
{dx,
6188+
dx_cache,
6189+
dx_sum,
6190+
dx_mean_sub,
6191+
dx_mean_sub_pow,
6192+
dx_var_sum,
6193+
dx_norm,
6194+
dx_norm_gamma,
6195+
dx_norm_gamma_beta});
61906196

61916197
std::vector<TensorView*> all_tensors;
61926198
all_tensors.insert(
@@ -6309,20 +6315,21 @@ TEST(NVFuserTest, FusionSmemDynamicPersistentBatchNorm_CUDA) {
63096315
cache_x->setMemoryType(MemoryType::Shared);
63106316
x_mean_sub->setMemoryType(MemoryType::Shared);
63116317

6312-
std::vector<TensorView*> all_tensors({x_sum,
6313-
x_mean,
6314-
cache_x,
6315-
x_sum_bcast,
6316-
x_mean_sub,
6317-
x_mean_sub_pow,
6318-
var_sum,
6319-
var_sum_bcast,
6320-
var,
6321-
var_eps,
6322-
rvar,
6323-
norm,
6324-
norm_gamma,
6325-
norm_gamma_beta});
6318+
std::vector<TensorView*> all_tensors(
6319+
{x_sum,
6320+
x_mean,
6321+
cache_x,
6322+
x_sum_bcast,
6323+
x_mean_sub,
6324+
x_mean_sub_pow,
6325+
var_sum,
6326+
var_sum_bcast,
6327+
var,
6328+
var_eps,
6329+
rvar,
6330+
norm,
6331+
norm_gamma,
6332+
norm_gamma_beta});
63266333

63276334
auto tidx = new Int();
63286335
fusion.addInput(tidx);

test/cpp/jit/test_misc.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ TEST(RecordFunctionTest, Callbacks) {
10441044
ids.clear();
10451045
{ // START: global test
10461046
addGlobalCallback(RecordFunctionCallback(
1047-
[](const RecordFunction &
1047+
[](const RecordFunction&
10481048
/* unused */) -> std::unique_ptr<at::ObserverContext> {
10491049
auto ctx = std::make_unique<TestContext>();
10501050
ctx->a = 123;
@@ -1070,7 +1070,7 @@ TEST(RecordFunctionTest, Callbacks) {
10701070
const int test_val = 234;
10711071
const std::string test_str = "test thread str";
10721072
addThreadLocalCallback(RecordFunctionCallback(
1073-
[](const RecordFunction &
1073+
[](const RecordFunction&
10741074
/* unused */) -> std::unique_ptr<at::ObserverContext> {
10751075
auto ctx = std::make_unique<TestContext>();
10761076
ctx->a = 234;

test/cpp/tensorexpr/test_conv.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,10 @@ TEST(Conv, Conv2D) {
7575

7676
at::Tensor result = at::empty_like(ref);
7777
te::SimpleIREvaluator cg(s, {inputB, filterB, conv});
78-
cg.call({input.data_ptr<float>(),
79-
filter.data_ptr<float>(),
80-
result.data_ptr<float>()});
78+
cg.call(
79+
{input.data_ptr<float>(),
80+
filter.data_ptr<float>(),
81+
result.data_ptr<float>()});
8182

8283
ASSERT_TRUE(at::allclose(ref, result, 1e-3, 1e-3));
8384
}

test/cpp/tensorexpr/test_llvm.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -294,15 +294,15 @@ TEST(LLVM, LetTestMultitype) {
294294
std::vector<void*> args({v.data()});
295295
VarHandle x("x", kByte);
296296
VarHandle y("y", kHalf);
297-
auto block =
298-
Block::make({Let::make(x, 3),
299-
Let::make(y, 6.f),
300-
a.store(
301-
{0},
302-
Cast::make(
303-
kDouble,
304-
ExprHandle(2.f) +
305-
(x * ExprHandle(3.f) + y * ExprHandle(4.f))))});
297+
auto block = Block::make(
298+
{Let::make(x, 3),
299+
Let::make(y, 6.f),
300+
a.store(
301+
{0},
302+
Cast::make(
303+
kDouble,
304+
ExprHandle(2.f) +
305+
(x * ExprHandle(3.f) + y * ExprHandle(4.f))))});
306306

307307
LLVMCodeGen cg(block, {a});
308308
ASSERT_EQ(cg.value<int>(args), 0);

test/cpp/tensorexpr/test_loopnest.cpp

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2606,8 +2606,9 @@ TEST(LoopNest, UnrollMultipleStatements) {
26062606
x,
26072607
0,
26082608
kTotalSize,
2609-
Block::make({Store::make(a_buf, {x}, x * 2),
2610-
Store::make(b_buf, {x}, Load::make(a_buf, {x}, 1))}));
2609+
Block::make(
2610+
{Store::make(a_buf, {x}, x * 2),
2611+
Store::make(b_buf, {x}, Load::make(a_buf, {x}, 1))}));
26112612
Block::make({f});
26122613
Stmt* unrolled = nullptr;
26132614
LoopNest::unroll(f, &unrolled);
@@ -2658,9 +2659,10 @@ TEST(LoopNest, UnrollWithLet) {
26582659
x,
26592660
0,
26602661
kTotalSize,
2661-
Block::make({Let::make(e, 7),
2662-
Store::make(a_buf, {x}, e),
2663-
Store::make(b_buf, {x}, e + 1)}));
2662+
Block::make(
2663+
{Let::make(e, 7),
2664+
Store::make(a_buf, {x}, e),
2665+
Store::make(b_buf, {x}, e + 1)}));
26642666
Block::make({f});
26652667
Stmt* unrolled = nullptr;
26662668
LoopNest::unroll(f, &unrolled);
@@ -2700,9 +2702,9 @@ TEST(LoopNest, NormalizeStartPositive) {
27002702
BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
27012703
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
27022704
VarHandle x("x", kInt);
2703-
auto for_body =
2704-
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2705-
Store::make(b_buf, {x}, x * 2)});
2705+
auto for_body = Block::make(
2706+
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2707+
Store::make(b_buf, {x}, x * 2)});
27062708
auto for_stmt = For::make(x, 50, 100, for_body);
27072709
Block::make({for_stmt});
27082710

@@ -2768,9 +2770,9 @@ TEST(LoopNest, NormalizeStartZero) {
27682770
BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
27692771
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
27702772
VarHandle x("x", kInt);
2771-
auto for_body =
2772-
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2773-
Store::make(b_buf, {x}, x * 2)});
2773+
auto for_body = Block::make(
2774+
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2775+
Store::make(b_buf, {x}, x * 2)});
27742776
auto for_stmt = For::make(x, 0, 100, for_body);
27752777
Block::make({for_stmt});
27762778

@@ -2803,9 +2805,9 @@ TEST(LoopNest, NormalizeStartVariable) {
28032805
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
28042806
VarHandle x("x", kInt);
28052807
VarHandle y("y", kInt);
2806-
auto for_body =
2807-
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2808-
Store::make(b_buf, {x}, x * 2)});
2808+
auto for_body = Block::make(
2809+
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
2810+
Store::make(b_buf, {x}, x * 2)});
28092811
auto for_stmt = For::make(x, y, 100, for_body);
28102812
Block::make({for_stmt});
28112813

0 commit comments

Comments
 (0)