Skip to content

Commit 637f2f3

Browse files
moar55Omar Ibrahim
and
Omar Ibrahim
authored
[CIR][CIRGen] Handle __sync_{and,or,xor}_and_fetch (#1328)
This addresses #1273. `Nand` is missing here, as i didn't intuitively know how to implement it initially. I think I have figured it out and will push in an upcoming commit. Co-authored-by: Omar Ibrahim <[email protected]>
1 parent 3e17e7b commit 637f2f3

File tree

2 files changed

+240
-5
lines changed

2 files changed

+240
-5
lines changed

Diff for: clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

+8-5
Original file line numberDiff line numberDiff line change
@@ -1078,8 +1078,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
10781078
bool LoseInfo = false;
10791079
Probability.convert(llvm::APFloat::IEEEdouble(),
10801080
llvm::RoundingMode::Dynamic, &LoseInfo);
1081-
ProbAttr = mlir::FloatAttr::get(
1082-
mlir::Float64Type::get(&getMLIRContext()), Probability);
1081+
ProbAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
1082+
Probability);
10831083
}
10841084

10851085
auto result = builder.create<cir::ExpectOp>(getLoc(E->getSourceRange()),
@@ -1766,21 +1766,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
17661766
case Builtin::BI__sync_and_and_fetch_4:
17671767
case Builtin::BI__sync_and_and_fetch_8:
17681768
case Builtin::BI__sync_and_and_fetch_16:
1769-
llvm_unreachable("BI__sync_and_and_fetch like NYI");
1769+
return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::And, E,
1770+
cir::BinOpKind::And);
17701771

17711772
case Builtin::BI__sync_or_and_fetch_1:
17721773
case Builtin::BI__sync_or_and_fetch_2:
17731774
case Builtin::BI__sync_or_and_fetch_4:
17741775
case Builtin::BI__sync_or_and_fetch_8:
17751776
case Builtin::BI__sync_or_and_fetch_16:
1776-
llvm_unreachable("BI__sync_or_and_fetch like NYI");
1777+
return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Or, E,
1778+
cir::BinOpKind::Or);
17771779

17781780
case Builtin::BI__sync_xor_and_fetch_1:
17791781
case Builtin::BI__sync_xor_and_fetch_2:
17801782
case Builtin::BI__sync_xor_and_fetch_4:
17811783
case Builtin::BI__sync_xor_and_fetch_8:
17821784
case Builtin::BI__sync_xor_and_fetch_16:
1783-
llvm_unreachable("BI__sync_xor_and_fetch like NYI");
1785+
return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Xor, E,
1786+
cir::BinOpKind::Xor);
17841787

17851788
case Builtin::BI__sync_nand_and_fetch_1:
17861789
case Builtin::BI__sync_nand_and_fetch_2:

Diff for: clang/test/CIR/CodeGen/atomic.cpp

+232
Original file line numberDiff line numberDiff line change
@@ -816,4 +816,236 @@ extern "C" void test_op_and_fetch (void)
816816
// LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]]
817817
// LLVM: store i64 [[RET7]], ptr @ull, align 8
818818
ull = __sync_sub_and_fetch (&ull, uc);
819+
820+
// CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i
821+
// CHECK: [[RES0:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
822+
// CHECK: [[RET0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i
823+
// LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1
824+
// LLVM: [[RES0:%.*]] = atomicrmw and ptr @sc, i8 [[VAL0]] seq_cst, align 1
825+
// LLVM: [[RET0:%.*]] = and i8 [[RES0]], [[VAL0]]
826+
// LLVM: store i8 [[RET0]], ptr @sc, align 1
827+
sc = __sync_and_and_fetch (&sc, uc);
828+
829+
// CHECK: [[RES1:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
830+
// CHECK: [[RET1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i
831+
// LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1
832+
// LLVM: [[RES1:%.*]] = atomicrmw and ptr @uc, i8 [[VAL1]] seq_cst, align 1
833+
// LLVM: [[RET1:%.*]] = and i8 [[RES1]], [[VAL1]]
834+
// LLVM: store i8 [[RET1]], ptr @uc, align 1
835+
uc = __sync_and_and_fetch (&uc, uc);
836+
837+
// CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i
838+
// CHECK: [[RES2:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
839+
// CHECK: [[RET2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i
840+
// LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1
841+
// LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
842+
// LLVM: [[RES2:%.*]] = atomicrmw and ptr @ss, i16 [[CONV2]] seq_cst, align 2
843+
// LLVM: [[RET2:%.*]] = and i16 [[RES2]], [[CONV2]]
844+
// LLVM: store i16 [[RET2]], ptr @ss, align 2
845+
ss = __sync_and_and_fetch (&ss, uc);
846+
847+
// CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i
848+
// CHECK: [[RES3:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
849+
// CHECK: [[RET3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i
850+
// LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1
851+
// LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
852+
// LLVM: [[RES3:%.*]] = atomicrmw and ptr @us, i16 [[CONV3]] seq_cst, align 2
853+
// LLVM: [[RET3:%.*]] = and i16 [[RES3]], [[CONV3]]
854+
// LLVM: store i16 [[RET3]], ptr @us
855+
us = __sync_and_and_fetch (&us, uc);
856+
857+
// CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i
858+
// CHECK: [[RES4:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
859+
// CHECK: [[RET4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i
860+
// LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1
861+
// LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
862+
// LLVM: [[RES4:%.*]] = atomicrmw and ptr @si, i32 [[CONV4]] seq_cst, align 4
863+
// LLVM: [[RET4:%.*]] = and i32 [[RES4]], [[CONV4]]
864+
// LLVM: store i32 [[RET4]], ptr @si, align 4
865+
si = __sync_and_and_fetch (&si, uc);
866+
867+
// CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i
868+
// CHECK: [[RES5:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
869+
// CHECK: [[RET5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i
870+
// LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1
871+
// LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
872+
// LLVM: [[RES5:%.*]] = atomicrmw and ptr @ui, i32 [[CONV5]] seq_cst, align 4
873+
// LLVM: [[RET5:%.*]] = and i32 [[RES5]], [[CONV5]]
874+
// LLVM: store i32 [[RET5]], ptr @ui, align 4
875+
ui = __sync_and_and_fetch (&ui, uc);
876+
877+
// CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i
878+
// CHECK: [[RES6:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
879+
// CHECK: [[RET6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i
880+
// LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1
881+
// LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
882+
// LLVM: [[RES6:%.*]] = atomicrmw and ptr @sll, i64 [[CONV6]] seq_cst, align 8
883+
// LLVM: [[RET6:%.*]] = and i64 [[RES6]], [[CONV6]]
884+
// LLVM: store i64 [[RET6]], ptr @sll, align 8
885+
sll = __sync_and_and_fetch (&sll, uc);
886+
887+
// CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i
888+
// CHECK: [[RES7:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
889+
// CHECK: [[RET7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i
890+
// LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1
891+
// LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
892+
// LLVM: [[RES7:%.*]] = atomicrmw and ptr @ull, i64 [[CONV7]] seq_cst, align 8
893+
// LLVM: [[RET7:%.*]] = and i64 [[RES7]], [[CONV7]]
894+
// LLVM: store i64 [[RET7]], ptr @ull, align 8
895+
ull = __sync_and_and_fetch (&ull, uc);
896+
897+
// CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i
898+
// CHECK: [[RES0:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
899+
// CHECK: [[RET0:%.*]] = cir.binop(or, [[RES0]], [[VAL0]]) : !s8i
900+
// LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1
901+
// LLVM: [[RES0:%.*]] = atomicrmw or ptr @sc, i8 [[VAL0]] seq_cst, align 1
902+
// LLVM: [[RET0:%.*]] = or i8 [[RES0]], [[VAL0]]
903+
// LLVM: store i8 [[RET0]], ptr @sc, align 1
904+
sc = __sync_or_and_fetch (&sc, uc);
905+
906+
// CHECK: [[RES1:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
907+
// CHECK: [[RET1:%.*]] = cir.binop(or, [[RES1]], [[VAL1]]) : !u8i
908+
// LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1
909+
// LLVM: [[RES1:%.*]] = atomicrmw or ptr @uc, i8 [[VAL1]] seq_cst, align 1
910+
// LLVM: [[RET1:%.*]] = or i8 [[RES1]], [[VAL1]]
911+
// LLVM: store i8 [[RET1]], ptr @uc, align 1
912+
uc = __sync_or_and_fetch (&uc, uc);
913+
914+
// CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i
915+
// CHECK: [[RES2:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
916+
// CHECK: [[RET2:%.*]] = cir.binop(or, [[RES2]], [[VAL2]]) : !s16i
917+
// LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1
918+
// LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
919+
// LLVM: [[RES2:%.*]] = atomicrmw or ptr @ss, i16 [[CONV2]] seq_cst, align 2
920+
// LLVM: [[RET2:%.*]] = or i16 [[RES2]], [[CONV2]]
921+
// LLVM: store i16 [[RET2]], ptr @ss, align 2
922+
ss = __sync_or_and_fetch (&ss, uc);
923+
924+
// CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i
925+
// CHECK: [[RES3:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
926+
// CHECK: [[RET3:%.*]] = cir.binop(or, [[RES3]], [[VAL3]]) : !u16i
927+
// LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1
928+
// LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
929+
// LLVM: [[RES3:%.*]] = atomicrmw or ptr @us, i16 [[CONV3]] seq_cst, align 2
930+
// LLVM: [[RET3:%.*]] = or i16 [[RES3]], [[CONV3]]
931+
// LLVM: store i16 [[RET3]], ptr @us
932+
us = __sync_or_and_fetch (&us, uc);
933+
934+
// CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i
935+
// CHECK: [[RES4:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
936+
// CHECK: [[RET4:%.*]] = cir.binop(or, [[RES4]], [[VAL4]]) : !s32i
937+
// LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1
938+
// LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
939+
// LLVM: [[RES4:%.*]] = atomicrmw or ptr @si, i32 [[CONV4]] seq_cst, align 4
940+
// LLVM: [[RET4:%.*]] = or i32 [[RES4]], [[CONV4]]
941+
// LLVM: store i32 [[RET4]], ptr @si, align 4
942+
si = __sync_or_and_fetch (&si, uc);
943+
944+
// CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i
945+
// CHECK: [[RES5:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
946+
// CHECK: [[RET5:%.*]] = cir.binop(or, [[RES5]], [[VAL5]]) : !u32i
947+
// LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1
948+
// LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
949+
// LLVM: [[RES5:%.*]] = atomicrmw or ptr @ui, i32 [[CONV5]] seq_cst, align 4
950+
// LLVM: [[RET5:%.*]] = or i32 [[RES5]], [[CONV5]]
951+
// LLVM: store i32 [[RET5]], ptr @ui, align 4
952+
ui = __sync_or_and_fetch (&ui, uc);
953+
954+
// CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i
955+
// CHECK: [[RES6:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
956+
// CHECK: [[RET6:%.*]] = cir.binop(or, [[RES6]], [[VAL6]]) : !s64i
957+
// LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1
958+
// LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
959+
// LLVM: [[RES6:%.*]] = atomicrmw or ptr @sll, i64 [[CONV6]] seq_cst, align 8
960+
// LLVM: [[RET6:%.*]] = or i64 [[RES6]], [[CONV6]]
961+
// LLVM: store i64 [[RET6]], ptr @sll, align 8
962+
sll = __sync_or_and_fetch (&sll, uc);
963+
964+
// CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i
965+
// CHECK: [[RES7:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
966+
// CHECK: [[RET7:%.*]] = cir.binop(or, [[RES7]], [[VAL7]]) : !u64i
967+
// LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1
968+
// LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
969+
// LLVM: [[RES7:%.*]] = atomicrmw or ptr @ull, i64 [[CONV7]] seq_cst, align 8
970+
// LLVM: [[RET7:%.*]] = or i64 [[RES7]], [[CONV7]]
971+
// LLVM: store i64 [[RET7]], ptr @ull, align 8
972+
ull = __sync_or_and_fetch (&ull, uc);
973+
974+
// CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i
975+
// CHECK: [[RES0:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
976+
// CHECK: [[RET0:%.*]] = cir.binop(xor, [[RES0]], [[VAL0]]) : !s8i
977+
// LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1
978+
// LLVM: [[RES0:%.*]] = atomicrmw xor ptr @sc, i8 [[VAL0]] seq_cst, align 1
979+
// LLVM: [[RET0:%.*]] = xor i8 [[RES0]], [[VAL0]]
980+
// LLVM: store i8 [[RET0]], ptr @sc, align 1
981+
sc = __sync_xor_and_fetch (&sc, uc);
982+
983+
// CHECK: [[RES1:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
984+
// CHECK: [[RET1:%.*]] = cir.binop(xor, [[RES1]], [[VAL1]]) : !u8i
985+
// LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1
986+
// LLVM: [[RES1:%.*]] = atomicrmw xor ptr @uc, i8 [[VAL1]] seq_cst, align 1
987+
// LLVM: [[RET1:%.*]] = xor i8 [[RES1]], [[VAL1]]
988+
// LLVM: store i8 [[RET1]], ptr @uc, align 1
989+
uc = __sync_xor_and_fetch (&uc, uc);
990+
991+
// CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i
992+
// CHECK: [[RES2:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
993+
// CHECK: [[RET2:%.*]] = cir.binop(xor, [[RES2]], [[VAL2]]) : !s16i
994+
// LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1
995+
// LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
996+
// LLVM: [[RES2:%.*]] = atomicrmw xor ptr @ss, i16 [[CONV2]] seq_cst, align 2
997+
// LLVM: [[RET2:%.*]] = xor i16 [[RES2]], [[CONV2]]
998+
// LLVM: store i16 [[RET2]], ptr @ss, align 2
999+
ss = __sync_xor_and_fetch (&ss, uc);
1000+
1001+
// CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i
1002+
// CHECK: [[RES3:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
1003+
// CHECK: [[RET3:%.*]] = cir.binop(xor, [[RES3]], [[VAL3]]) : !u16i
1004+
// LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1
1005+
// LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
1006+
// LLVM: [[RES3:%.*]] = atomicrmw xor ptr @us, i16 [[CONV3]] seq_cst, align 2
1007+
// LLVM: [[RET3:%.*]] = xor i16 [[RES3]], [[CONV3]]
1008+
// LLVM: store i16 [[RET3]], ptr @us
1009+
us = __sync_xor_and_fetch (&us, uc);
1010+
1011+
// CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i
1012+
// CHECK: [[RES4:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
1013+
// CHECK: [[RET4:%.*]] = cir.binop(xor, [[RES4]], [[VAL4]]) : !s32i
1014+
// LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1
1015+
// LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
1016+
// LLVM: [[RES4:%.*]] = atomicrmw xor ptr @si, i32 [[CONV4]] seq_cst, align 4
1017+
// LLVM: [[RET4:%.*]] = xor i32 [[RES4]], [[CONV4]]
1018+
// LLVM: store i32 [[RET4]], ptr @si, align 4
1019+
si = __sync_xor_and_fetch (&si, uc);
1020+
1021+
// CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i
1022+
// CHECK: [[RES5:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
1023+
// CHECK: [[RET5:%.*]] = cir.binop(xor, [[RES5]], [[VAL5]]) : !u32i
1024+
// LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1
1025+
// LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
1026+
// LLVM: [[RES5:%.*]] = atomicrmw xor ptr @ui, i32 [[CONV5]] seq_cst, align 4
1027+
// LLVM: [[RET5:%.*]] = xor i32 [[RES5]], [[CONV5]]
1028+
// LLVM: store i32 [[RET5]], ptr @ui, align 4
1029+
ui = __sync_xor_and_fetch (&ui, uc);
1030+
1031+
// CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i
1032+
// CHECK: [[RES6:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
1033+
// CHECK: [[RET6:%.*]] = cir.binop(xor, [[RES6]], [[VAL6]]) : !s64i
1034+
// LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1
1035+
// LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
1036+
// LLVM: [[RES6:%.*]] = atomicrmw xor ptr @sll, i64 [[CONV6]] seq_cst, align 8
1037+
// LLVM: [[RET6:%.*]] = xor i64 [[RES6]], [[CONV6]]
1038+
// LLVM: store i64 [[RET6]], ptr @sll, align 8
1039+
sll = __sync_xor_and_fetch (&sll, uc);
1040+
1041+
// CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i
1042+
// CHECK: [[RES7:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
1043+
// CHECK: [[RET7:%.*]] = cir.binop(xor, [[RES7]], [[VAL7]]) : !u64i
1044+
// LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1
1045+
// LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
1046+
// LLVM: [[RES7:%.*]] = atomicrmw xor ptr @ull, i64 [[CONV7]] seq_cst, align 8
1047+
// LLVM: [[RET7:%.*]] = xor i64 [[RES7]], [[CONV7]]
1048+
// LLVM: store i64 [[RET7]], ptr @ull, align 8
1049+
ull = __sync_xor_and_fetch (&ull, uc);
1050+
8191051
}

0 commit comments

Comments
 (0)