diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 3aa170eff8606..c22cf607df7d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -116,6 +116,15 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value emitPromotedComplexOperand(const Expr *e, QualType promotionTy); + LValue emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), + RValue &value); + + mlir::Value emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)); + mlir::Value emitBinAdd(const BinOpInfo &op); mlir::Value emitBinSub(const BinOpInfo &op); mlir::Value emitBinMul(const BinOpInfo &op); @@ -153,6 +162,15 @@ class ComplexExprEmitter : public StmtVisitor { HANDLEBINOP(Sub) HANDLEBINOP(Mul) #undef HANDLEBINOP + + // Compound assignments. + mlir::Value VisitBinAddAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinAdd); + } + + mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub); + } }; } // namespace @@ -166,6 +184,12 @@ static const ComplexType *getComplexType(QualType type) { } #endif // NDEBUG +static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Value real) { + mlir::Value imag = builder.getNullValue(real.getType(), loc); + return builder.createComplexCreate(loc, real, imag); +} + LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e, mlir::Value &value) { assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(), @@ -602,7 +626,7 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e, mlir::Value result = Visit(const_cast(e)); if (!promotionTy.isNull()) - cgf.cgm.errorNYI("emitPromoted emitPromotedValue"); + return cgf.emitPromotedValue(result, promotionTy); return result; } @@ -630,6 +654,104 @@ ComplexExprEmitter::emitBinOps(const BinaryOperator *e, QualType promotionTy) { return binOpInfo; } +LValue ComplexExprEmitter::emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), RValue &value) { + QualType lhsTy = e->getLHS()->getType(); + QualType rhsTy = e->getRHS()->getType(); + SourceLocation exprLoc = e->getExprLoc(); + mlir::Location loc = cgf.getLoc(exprLoc); + + if (lhsTy->getAs()) { + cgf.cgm.errorNYI("emitCompoundAssignLValue AtmoicType"); + return {}; + } + + BinOpInfo opInfo{loc}; + opInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts()); + + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + + // Load the RHS and LHS operands. + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen a little. + QualType promotionTypeCR = getPromotionType(e->getComputationResultType()); + opInfo.ty = promotionTypeCR.isNull() ? e->getComputationResultType() + : promotionTypeCR; + + QualType complexElementTy = + opInfo.ty->castAs()->getElementType(); + QualType promotionTypeRHS = getPromotionType(rhsTy); + + // The RHS should have been converted to the computation type. + if (e->getRHS()->getType()->isRealFloatingType()) { + if (!promotionTypeRHS.isNull()) { + opInfo.rhs = createComplexFromReal( + cgf.getBuilder(), loc, + cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS)); + } else { + assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy)); + opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc, + cgf.emitScalarExpr(e->getRHS())); + } + } else { + if (!promotionTypeRHS.isNull()) { + opInfo.rhs = cgf.emitPromotedComplexExpr(e->getRHS(), promotionTypeRHS); + } else { + assert(cgf.getContext().hasSameUnqualifiedType(opInfo.ty, rhsTy)); + opInfo.rhs = Visit(e->getRHS()); + } + } + + LValue lhs = cgf.emitLValue(e->getLHS()); + + // Load from the l-value and convert it. + QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType()); + if (lhsTy->isAnyComplexType()) { + mlir::Value lhsValue = emitLoadOfLValue(lhs, exprLoc); + QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS; + opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc); + } else { + cgf.cgm.errorNYI("emitCompoundAssignLValue emitLoadOfScalar"); + return {}; + } + + // Expand the binary operator. + mlir::Value result = (this->*func)(opInfo); + + // Truncate the result and store it into the LHS lvalue. + if (lhsTy->isAnyComplexType()) { + mlir::Value resultValue = + emitComplexToComplexCast(result, opInfo.ty, lhsTy, exprLoc); + emitStoreOfComplex(loc, resultValue, lhs, /*isInit*/ false); + value = RValue::getComplex(resultValue); + } else { + mlir::Value resultValue = + cgf.emitComplexToScalarConversion(result, opInfo.ty, lhsTy, exprLoc); + cgf.emitStoreOfScalar(resultValue, lhs, /*isInit*/ false); + value = RValue::get(resultValue); + } + + return lhs; +} + +mlir::Value ComplexExprEmitter::emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)) { + RValue val; + LValue lv = emitCompoundAssignLValue(e, func, val); + + // The result of an assignment in C is the assigned r-value. + if (!cgf.getLangOpts().CPlusPlus) + return val.getComplexValue(); + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!lv.isVolatileQualified()) + return val.getComplexValue(); + + return emitLoadOfLValue(lv, e->getExprLoc()); +} + mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) { assert(!cir::MissingFeatures::fastMathFlags()); assert(!cir::MissingFeatures::cgFPOptionsRAII()); @@ -685,6 +807,31 @@ mlir::Value CIRGenFunction::emitComplexExpr(const Expr *e) { return ComplexExprEmitter(*this).Visit(const_cast(e)); } +using CompoundFunc = + mlir::Value (ComplexExprEmitter::*)(const ComplexExprEmitter::BinOpInfo &); + +static CompoundFunc getComplexOp(BinaryOperatorKind op) { + switch (op) { + case BO_MulAssign: + llvm_unreachable("getComplexOp: BO_MulAssign"); + case BO_DivAssign: + llvm_unreachable("getComplexOp: BO_DivAssign"); + case BO_SubAssign: + return &ComplexExprEmitter::emitBinSub; + case BO_AddAssign: + return &ComplexExprEmitter::emitBinAdd; + default: + llvm_unreachable("unexpected complex compound assignment"); + } +} + +LValue CIRGenFunction::emitComplexCompoundAssignmentLValue( + const CompoundAssignOperator *e) { + CompoundFunc op = getComplexOp(e->getOpcode()); + RValue val; + return ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, val); +} + mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, @@ -729,3 +876,11 @@ mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *e, QualType promotionType) { return ComplexExprEmitter(*this).emitPromoted(e, promotionType); } + +mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result, + QualType promotionType) { + assert(!mlir::cast(result.getType()).isIntegerComplex() && + "integral complex will never be promoted"); + return builder.createCast(cir::CastKind::float_complex, result, + convertType(promotionType)); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 32c1c1ada3c53..3e06513bb6cf0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1955,6 +1955,29 @@ mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src, .emitScalarConversion(src, srcTy, dstTy, loc); } +mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value src, + QualType srcTy, + QualType dstTy, + SourceLocation loc) { + assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) && + "Invalid complex -> scalar conversion"); + + QualType complexElemTy = srcTy->castAs()->getElementType(); + if (dstTy->isBooleanType()) { + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_bool + : cir::CastKind::int_complex_to_bool; + return builder.createCast(getLoc(loc), kind, src, convertType(dstTy)); + } + + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_real + : cir::CastKind::int_complex_to_real; + mlir::Value real = + builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy)); + return emitScalarConversion(real, complexElemTy, dstTy, loc); +} + mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { // Perform vector logical not on comparison with zero vector. if (e->getType()->isVectorType() && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f8e73475dc78a..d0fc8938e7be9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -784,9 +784,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { } if (!ty->isAnyComplexType()) return emitCompoundAssignmentLValue(cast(e)); - cgm.errorNYI(e->getSourceRange(), - "CompoundAssignOperator with ComplexType"); - return LValue(); + + return emitComplexCompoundAssignmentLValue(cast(e)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 68d54bb966cdb..4f39ee974fff1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -944,6 +944,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// sanitizer is enabled, a runtime check is also emitted. mlir::Value emitCheckedArgForAssume(const Expr *e); + /// Emit a conversion from the specified complex type to the specified + /// destination type, where the destination type is an LLVM scalar type. + mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, + QualType dstTy, SourceLocation loc); + LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e); @@ -1047,6 +1052,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); + mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType); + /// Emit the computation of the specified expression of scalar type. mlir::Value emitScalarExpr(const clang::Expr *e); @@ -1076,6 +1083,7 @@ class CIRGenFunction : public CIRGenTypeCache { cir::UnaryOpKind op, bool isPre); LValue emitComplexAssignmentLValue(const BinaryOperator *e); + LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e); void emitCompoundStmt(const clang::CompoundStmt &s); diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.cpp b/clang/test/CIR/CodeGen/complex-arithmetic.cpp index 5e384cd34ebfd..fbb0335106f88 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.cpp +++ b/clang/test/CIR/CodeGen/complex-arithmetic.cpp @@ -11,16 +11,16 @@ void foo() { int _Complex c = a + b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex -// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 @@ -30,16 +30,16 @@ void foo() { // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[ADD_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 // OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL:.*]] = add i32 %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG:.*]] = add i32 %[[A_IMAG]], %[[B_IMAG]] @@ -54,16 +54,16 @@ void foo2() { float _Complex c = a + b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -73,16 +73,16 @@ void foo2() { // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[ADD_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] @@ -98,23 +98,23 @@ void foo3() { float _Complex d = (a + b) + c; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["d", init] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex -// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[C_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[ADD_A_B_C:.*]] = cir.complex.add %[[ADD_A_B]], %[[TMP_C]] : !cir.complex // CIR: cir.store{{.*}} %[[ADD_A_B_C]], %[[RESULT]] : !cir.complex, !cir.ptr> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4 // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -123,7 +123,7 @@ void foo3() { // LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0 // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[ADD_IMAG_A_B]], 1 -// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4 +// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[C_ADDR]], align 4 // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0 // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1 // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0 @@ -134,23 +134,23 @@ void foo3() { // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[ADD_IMAG_A_B_C]], 1 // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] -// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0 // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4 -// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1 // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[ADD_REAL_A_B]], %[[C_REAL]] // OGCG: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[ADD_IMAG_A_B]], %[[C_IMAG]] @@ -165,17 +165,17 @@ void foo4() { int _Complex c = a - b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex -// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 @@ -184,18 +184,18 @@ void foo4() { // LLVM: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[SUB_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[SUB_IMAG]], 1 -// LLVM: store { i32, i32 } %[[RESULT_2]], ptr %[[COMPLEX_C]], align 4 +// LLVM: store { i32, i32 } %[[RESULT_2]], ptr %[[C_ADDR]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 // OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL:.*]] = sub i32 %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]] @@ -210,16 +210,16 @@ void foo5() { float _Complex c = a - b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -229,16 +229,16 @@ void foo5() { // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[SUB_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL:.*]] = fsub float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] @@ -254,23 +254,23 @@ void foo6() { float _Complex d = (a - b) - c; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] -// CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["d", init] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[SUB_A_B:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex -// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[C_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[SUB_A_B_C:.*]] = cir.complex.sub %[[SUB_A_B]], %[[TMP_C]] : !cir.complex // CIR: cir.store{{.*}} %[[SUB_A_B_C]], %[[RESULT]] : !cir.complex, !cir.ptr> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4 // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -279,7 +279,7 @@ void foo6() { // LLVM: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B]], 0 // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[SUB_IMAG_A_B]], 1 -// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4 +// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[C_ADDR]], align 4 // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0 // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1 // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0 @@ -290,23 +290,23 @@ void foo6() { // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[SUB_IMAG_A_B_C]], 1 // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL_A_B:.*]] = fsub float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] -// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0 // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4 -// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1 // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL_A_B_C:.*]] = fsub float %[[SUB_REAL_A_B]], %[[C_REAL]] // OGCG: %[[SUB_IMAG_A_B_C:.*]] = fsub float %[[SUB_IMAG_A_B]], %[[C_IMAG]] @@ -314,3 +314,4 @@ void foo6() { // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1 // OGCG: store float %[[SUB_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4 // OGCG: store float %[[SUB_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4 + diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp new file mode 100644 index 0000000000000..35a8aa693f8ed --- /dev/null +++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp @@ -0,0 +1,288 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR,CXX_CIR +// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM,CXX_LLVM +// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG,CXX_OGCG +// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG + +void foo() { + float _Complex a; + float _Complex b; + b += a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : !cir.complex +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[ADD_REAL_A_B:.*]] = fadd float %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[ADD_A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[ADD_A_B]], float %[[ADD_IMAG_A_B]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL:.*]] = fadd float %[[B_REAL]], %[[A_REAL]] +// OGCG: %[[ADD_IMAG:.*]] = fadd float %[[B_IMAG]], %[[A_IMAG]] +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[ADD_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[ADD_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void foo1() { + float _Complex a; + float _Complex b; + b -= a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[RESULT:.*]] = cir.complex.sub %[[TMP_B]], %[[TMP_A]] : !cir.complex +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[SUB_REAL_A_B:.*]] = fsub float %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[SUB_IMAG_A_B:.*]] = fsub float %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[SUB_A_B:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[SUB_A_B]], float %[[SUB_IMAG_A_B]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[SUB_REAL:.*]] = fsub float %[[B_REAL]], %[[A_REAL]] +// OGCG: %[[SUB_IMAG:.*]] = fsub float %[[B_IMAG]], %[[A_IMAG]] +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[SUB_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[SUB_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void foo2() { + int _Complex a; + int _Complex b; + b += a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : !cir.complex +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 +// LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 +// LLVM: %[[ADD_REAL_A_B:.*]] = add i32 %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[ADD_IMAG_A_B:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[ADD_A_B:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL_A_B]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[ADD_A_B]], i32 %[[ADD_IMAG_A_B]], 1 +// LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL:.*]] = add i32 %[[B_REAL]], %[[A_REAL]] +// OGCG: %[[ADD_IMAG:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]] +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store i32 %[[ADD_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store i32 %[[ADD_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void foo3() { + _Float16 _Complex a; + _Float16 _Complex b; + b += a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.f16 +// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.f16 +// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float +// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float +// CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[B_COMPLEX_F32]], %[[A_COMPLEX_F32]] : !cir.complex +// CIR: %[[ADD_REAL:.*]] = cir.complex.real %[[ADD_A_B]] : !cir.complex -> !cir.float +// CIR: %[[ADD_IMAG:.*]] = cir.complex.imag %[[ADD_A_B]] : !cir.complex -> !cir.float +// CIR: %[[ADD_REAL_F16:.*]] = cir.cast(floating, %[[ADD_REAL]] : !cir.float), !cir.f16 +// CIR: %[[ADD_IMAG_F16:.*]] = cir.cast(floating, %[[ADD_IMAG]] : !cir.float), !cir.f16 +// CIR: %[[RESULT:.*]] = cir.complex.create %[[ADD_REAL_F16]], %[[ADD_IMAG_F16]] : !cir.f16 -> !cir.complex +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %8, float %[[A_IMAG_F32]], 1 +// LLVM: %[[TMP_B:.*]] = load { half, half }, ptr %[[B_ADDR]], align 2 +// LLVM: %[[B_REAL:.*]] = extractvalue { half, half } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { half, half } %[[TMP_B]], 1 +// LLVM: %[[B_REAL_F32:.*]] = fpext half %[[B_REAL]] to float +// LLVM: %[[B_IMAG_F32:.*]] = fpext half %[[B_IMAG]] to float +// LLVM: %[[TMP_B_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[B_REAL_F32]], 0 +// LLVM: %[[B_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_B_COMPLEX_F32]], float %[[B_IMAG_F32]], 1 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[B_COMPLEX_F32]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[B_COMPLEX_F32]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[A_COMPLEX_F32]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[A_COMPLEX_F32]], 1 +// LLVM: %[[ADD_REAL:.*]] = fadd float %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[ADD_IMAG]], 1 +// LLVM: %[[RESULT_REAL:.*]] = extractvalue { float, float } %[[RESULT]], 0 +// LLVM: %[[RESULT_IMAG:.*]] = extractvalue { float, float } %[[RESULT]], 1 +// LLVM: %[[RESULT_REAL_F16:.*]] = fptrunc float %[[RESULT_REAL]] to half +// LLVM: %[[RESULT_IMAG_F26:.*]] = fptrunc float %[[RESULT_IMAG]] to half +// LLVM: %[[TMP_RESULT_F16:.*]] = insertvalue { half, half } undef, half %[[RESULT_REAL_F16]], 0 +// LLVM: %[[RESULT_F16:.*]] = insertvalue { half, half } %29, half %[[RESULT_IMAG_F26]], 1 +// LLVM: store { half, half } %[[RESULT_F16]], ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load half, ptr %[[A_REAL_PTR]], align 2 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load half, ptr %[[A_IMAG_PTR]], align 2 +// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load half, ptr %[[B_REAL_PTR]], align 2 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load half, ptr %[[B_IMAG_PTR]], align 2 +// OGCG: %[[B_REAL_F32:.*]] = fpext half %[[B_REAL]] to float +// OGCG: %[[B_IMAG_F32:.*]] = fpext half %[[B_IMAG]] to float +// OGCG: %[[ADD_REAL:.*]] = fadd float %[[B_REAL_F32]], %[[A_REAL_F32]] +// OGCG: %[[ADD_IMAG:.*]] = fadd float %[[B_IMAG_F32]], %[[A_IMAG_F32]] +// OGCG: %[[ADD_REAL_F16:.*]] = fptrunc float %[[ADD_REAL]] to half +// OGCG: %[[ADD_IMAG_F16:.*]] = fptrunc float %[[ADD_IMAG]] to half +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store half %[[ADD_REAL_F16]], ptr %[[B_REAL_PTR]], align 2 +// OGCG: store half %[[ADD_IMAG_F16]], ptr %[[B_IMAG_PTR]], align 2 + +#ifdef __cplusplus +void foo4() { + volatile _Complex int a; + volatile _Complex int b; + int _Complex c = b += a; +} +#endif + +// CXX_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CXX_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CXX_CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c", init] +// CXX_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : !cir.complex +// CXX_CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> +// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: cir.store{{.*}} %[[TMP_B]], %[[C_ADDR]] : !cir.complex, !cir.ptr + +// CXX_LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// CXX_LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// CXX_LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// CXX_LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 +// CXX_LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 +// CXX_LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1 +// CXX_LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 +// CXX_LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 +// CXX_LLVM: %[[ADD_REAL:.*]] = add i32 %[[B_REAL]], %[[A_REAL]] +// CXX_LLVM: %[[ADD_IMAG:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]] +// CXX_LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL]], 0 +// CXX_LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[ADD_IMAG]], 1 +// CXX_LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4 +// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 +// CXX_LLVM: store { i32, i32 } %[[TMP_B]], ptr %[[C_ADDR]], align 4 + +// CXX_OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// CXX_OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 +// CXX_OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4 +// CXX_OGCG: %a.realp = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 +// CXX_OGCG: %a.real = load volatile i32, ptr %a.realp, align 4 +// CXX_OGCG: %a.imagp = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 +// CXX_OGCG: %a.imag = load volatile i32, ptr %a.imagp, align 4 +// CXX_OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 +// CXX_OGCG: %[[B_REAL:.*]] = load volatile i32, ptr %[[B_REAL_PTR]], align 4 +// CXX_OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 +// CXX_OGCG: %[[B_IMAG:.*]] = load volatile i32, ptr %[[B_IMAG_PTR]], align 4 +// CXX_OGCG: %[[ADD_REAL:.*]] = add i32 %[[B_REAL]], %[[A_REAL]] +// CXX_OGCG: %[[ADD_IMAG:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]] +// CXX_OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 +// CXX_OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 +// CXX_OGCG: store volatile i32 %[[ADD_REAL]], ptr %[[B_REAL_PTR]], align 4 +// CXX_OGCG: store volatile i32 %[[ADD_IMAG]], ptr %[[B_IMAG_PTR]], align 4 +// CXX_OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 +// CXX_OGCG: %[[B_REAL:.*]] = load volatile i32, ptr %[[B_REAL_PTR]], align 4 +// CXX_OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 +// CXX_OGCG: %[[B_IMAG:.*]] = load volatile i32, ptr %[[B_IMAG_PTR]], align 4 +// CXX_OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0 +// CXX_OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1 +// CXX_OGCG: store i32 %[[B_REAL]], ptr %[[C_REAL_PTR]], align 4 +// CXX_OGCG: store i32 %[[B_IMAG]], ptr %[[C_IMAG_PTR]], align 4